Merge pull request #29 from greatest-ape/udp-sharded-state

aquatic_udp: remove glommio implementation, shard torrent state by request worker, improve aquatic_udp_load_test
This commit is contained in:
Joakim Frostegård 2021-11-19 15:39:53 +01:00 committed by GitHub
commit 7a07239c04
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
50 changed files with 2080 additions and 2577 deletions

View file

@ -19,7 +19,7 @@ fi
ulimit -a ulimit -a
$SUDO apt-get update $SUDO apt-get update
$SUDO apt-get install -y cmake libssl-dev screen rtorrent mktorrent ssl-cert ca-certificates curl golang $SUDO apt-get install -y cmake libssl-dev screen rtorrent mktorrent ssl-cert ca-certificates curl golang libhwloc-dev
git clone https://github.com/anacrolix/torrent.git gotorrent git clone https://github.com/anacrolix/torrent.git gotorrent
cd gotorrent cd gotorrent

View file

@ -15,10 +15,11 @@ jobs:
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Install dependencies
run: sudo apt-get update -y && sudo apt-get install libhwloc-dev -y
- name: Build - name: Build
run: | run: |
cargo build --verbose -p aquatic_udp --features "cpu-pinning" cargo build --verbose -p aquatic_udp --features "cpu-pinning"
cargo build --verbose -p aquatic_udp --features "with-glommio cpu-pinning" --no-default-features
cargo build --verbose -p aquatic_http --features "cpu-pinning" cargo build --verbose -p aquatic_http --features "cpu-pinning"

201
Cargo.lock generated
View file

@ -4,9 +4,9 @@ version = 3
[[package]] [[package]]
name = "addr2line" name = "addr2line"
version = "0.16.0" version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b"
dependencies = [ dependencies = [
"gimli", "gimli",
] ]
@ -17,24 +17,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "affinity"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "763e484feceb7dd021b21c5c6f81aee06b1594a743455ec7efbf72e6355e447b"
dependencies = [
"cfg-if",
"errno",
"libc",
"num_cpus",
]
[[package]]
name = "ahash"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217"
[[package]] [[package]]
name = "ahash" name = "ahash"
version = "0.7.6" version = "0.7.6"
@ -57,9 +39,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.44" version = "1.0.45"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1" checksum = "ee10e43ae4a853c0a3591d4e2ada1719e553be18199d9da9d4a83f5927c2f5c7"
[[package]] [[package]]
name = "aquatic" name = "aquatic"
@ -86,13 +68,14 @@ dependencies = [
name = "aquatic_common" name = "aquatic_common"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"affinity", "ahash",
"ahash 0.7.6",
"anyhow", "anyhow",
"arc-swap", "arc-swap",
"hashbrown 0.11.2", "hashbrown 0.11.2",
"hex", "hex",
"hwloc",
"indexmap-amortized", "indexmap-amortized",
"libc",
"log", "log",
"privdrop", "privdrop",
"rand", "rand",
@ -181,10 +164,7 @@ dependencies = [
"aquatic_udp_protocol", "aquatic_udp_protocol",
"cfg-if", "cfg-if",
"crossbeam-channel", "crossbeam-channel",
"futures-lite",
"glommio",
"hex", "hex",
"histogram",
"log", "log",
"mimalloc", "mimalloc",
"mio", "mio",
@ -194,6 +174,7 @@ dependencies = [
"rand", "rand",
"serde", "serde",
"signal-hook", "signal-hook",
"slab",
"socket2 0.4.2", "socket2 0.4.2",
] ]
@ -204,6 +185,7 @@ dependencies = [
"anyhow", "anyhow",
"aquatic_cli_helpers", "aquatic_cli_helpers",
"aquatic_udp", "aquatic_udp",
"aquatic_udp_protocol",
"crossbeam-channel", "crossbeam-channel",
"indicatif", "indicatif",
"mimalloc", "mimalloc",
@ -221,11 +203,9 @@ dependencies = [
"aquatic_cli_helpers", "aquatic_cli_helpers",
"aquatic_common", "aquatic_common",
"aquatic_udp_protocol", "aquatic_udp_protocol",
"crossbeam-channel",
"hashbrown 0.11.2", "hashbrown 0.11.2",
"mimalloc", "mimalloc",
"mio", "mio",
"parking_lot",
"quickcheck", "quickcheck",
"quickcheck_macros", "quickcheck_macros",
"rand", "rand",
@ -320,9 +300,9 @@ dependencies = [
[[package]] [[package]]
name = "arc-swap" name = "arc-swap"
version = "1.4.0" version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6df5aef5c5830360ce5218cecb8f018af3438af5686ae945094affc86fdec63" checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f"
[[package]] [[package]]
name = "arrayvec" name = "arrayvec"
@ -371,9 +351,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]] [[package]]
name = "backtrace" name = "backtrace"
version = "0.3.61" version = "0.3.63"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6"
dependencies = [ dependencies = [
"addr2line", "addr2line",
"cc", "cc",
@ -401,6 +381,12 @@ dependencies = [
"serde_bytes", "serde_bytes",
] ]
[[package]]
name = "bitflags"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "1.3.2" version = "1.3.2"
@ -442,9 +428,9 @@ checksum = "8ff9f338986406db85e2b5deb40a9255b796ca03a194c7457403d215173f3fd5"
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.7.1" version = "3.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9df67f7bf9ef8498769f994239c45613ef0c5899415fb58e9add412d2c1a538" checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
@ -475,9 +461,9 @@ dependencies = [
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.0.71" version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee"
[[package]] [[package]]
name = "cfg-if" name = "cfg-if"
@ -504,7 +490,7 @@ version = "2.33.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [ dependencies = [
"bitflags", "bitflags 1.3.2",
"textwrap", "textwrap",
"unicode-width", "unicode-width",
] ]
@ -773,9 +759,9 @@ dependencies = [
[[package]] [[package]]
name = "float-cmp" name = "float-cmp"
version = "0.8.0" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1267f4ac4f343772758f7b1bdcbe767c218bbab93bb432acbf5162bbf85a6c4" checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4"
dependencies = [ dependencies = [
"num-traits", "num-traits",
] ]
@ -954,17 +940,17 @@ dependencies = [
[[package]] [[package]]
name = "gimli" name = "gimli"
version = "0.25.0" version = "0.26.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
[[package]] [[package]]
name = "glommio" name = "glommio"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/DataDog/glommio.git?rev=4e6b14772da2f4325271fbcf12d24cf91ed466e5#4e6b14772da2f4325271fbcf12d24cf91ed466e5" source = "git+https://github.com/DataDog/glommio.git?rev=4e6b14772da2f4325271fbcf12d24cf91ed466e5#4e6b14772da2f4325271fbcf12d24cf91ed466e5"
dependencies = [ dependencies = [
"ahash 0.7.6", "ahash",
"bitflags", "bitflags 1.3.2",
"bitmaps", "bitmaps",
"buddy-alloc", "buddy-alloc",
"cc", "cc",
@ -1000,30 +986,20 @@ dependencies = [
[[package]] [[package]]
name = "half" name = "half"
version = "1.8.0" version = "1.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac5956d4e63858efaec57e0d6c1c2f6a41e1487f830314a324ccd7e2223a7ca0" checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
[[package]] [[package]]
name = "halfbrown" name = "halfbrown"
version = "0.1.11" version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c12499524b5585419ab2f51545a19b842263a373580a83c0eb98a0142a260a10" checksum = "3ed39577259d319b81a15176a32673271be2786cb463889703c58c90fe83c825"
dependencies = [ dependencies = [
"hashbrown 0.7.2", "hashbrown 0.11.2",
"serde", "serde",
] ]
[[package]]
name = "hashbrown"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf"
dependencies = [
"ahash 0.3.8",
"autocfg",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.9.1" version = "0.9.1"
@ -1036,7 +1012,7 @@ version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
dependencies = [ dependencies = [
"ahash 0.7.6", "ahash",
"serde", "serde",
] ]
@ -1078,6 +1054,21 @@ version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503"
[[package]]
name = "hwloc"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2934f84993b8b4bcae9b6a4e5f0aca638462dda9c7b4f26a570241494f21e0f4"
dependencies = [
"bitflags 0.7.0",
"errno",
"kernel32-sys",
"libc",
"num",
"pkg-config",
"winapi 0.2.8",
]
[[package]] [[package]]
name = "idna" name = "idna"
version = "0.2.3" version = "0.2.3"
@ -1114,9 +1105,9 @@ dependencies = [
[[package]] [[package]]
name = "instant" name = "instant"
version = "0.1.11" version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "716d3d89f35ac6a34fd0eed635395f4c3b76fa889338a4632e5231a8684216bd" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
] ]
@ -1172,9 +1163,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]] [[package]]
name = "libc" name = "libc"
version = "0.2.103" version = "0.2.107"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219"
[[package]] [[package]]
name = "libm" name = "libm"
@ -1184,9 +1175,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a"
[[package]] [[package]]
name = "libmimalloc-sys" name = "libmimalloc-sys"
version = "0.1.22" version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d1b8479c593dba88c2741fc50b92e13dbabbbe0bd504d979f244ccc1a5b1c01" checksum = "9636c194f9db483f4d0adf2f99a65011a99f904bd222bbd67fb4df4f37863c30"
dependencies = [ dependencies = [
"cc", "cc",
] ]
@ -1262,9 +1253,9 @@ dependencies = [
[[package]] [[package]]
name = "mimalloc" name = "mimalloc"
version = "0.1.26" version = "0.1.27"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb74897ce508e6c49156fd1476fc5922cbc6e75183c65e399c765a09122e5130" checksum = "cf5f78c1d9892fb5677a8b2f543f967ab891ac0f71feecd961435b74f877283a"
dependencies = [ dependencies = [
"libmimalloc-sys", "libmimalloc-sys",
] ]
@ -1281,9 +1272,9 @@ dependencies = [
[[package]] [[package]]
name = "mio" name = "mio"
version = "0.7.13" version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
@ -1325,7 +1316,7 @@ version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188" checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188"
dependencies = [ dependencies = [
"bitflags", "bitflags 1.3.2",
"cc", "cc",
"cfg-if", "cfg-if",
"libc", "libc",
@ -1347,6 +1338,17 @@ dependencies = [
"winapi 0.3.9", "winapi 0.3.9",
] ]
[[package]]
name = "num"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4703ad64153382334aa8db57c637364c322d3372e097840c72000dabdcf6156e"
dependencies = [
"num-integer",
"num-iter",
"num-traits",
]
[[package]] [[package]]
name = "num-format" name = "num-format"
version = "0.4.0" version = "0.4.0"
@ -1367,6 +1369,17 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "num-iter"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59"
dependencies = [
"autocfg",
"num-integer",
"num-traits",
]
[[package]] [[package]]
name = "num-traits" name = "num-traits"
version = "0.2.14" version = "0.2.14"
@ -1395,9 +1408,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]] [[package]]
name = "object" name = "object"
version = "0.26.2" version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9"
dependencies = [ dependencies = [
"memchr", "memchr",
] ]
@ -1426,7 +1439,7 @@ version = "0.10.38"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95"
dependencies = [ dependencies = [
"bitflags", "bitflags 1.3.2",
"cfg-if", "cfg-if",
"foreign-types", "foreign-types",
"libc", "libc",
@ -1442,9 +1455,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a"
[[package]] [[package]]
name = "openssl-sys" name = "openssl-sys"
version = "0.9.70" version = "0.9.71"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6517987b3f8226b5da3661dad65ff7f300cc59fb5ea8333ca191fc65fde3edf" checksum = "7df13d165e607909b363a4757a6f133f8a818a74e9d3a98d09c6128e15fa4c73"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"cc", "cc",
@ -1544,9 +1557,9 @@ dependencies = [
[[package]] [[package]]
name = "ppv-lite86" name = "ppv-lite86"
version = "0.2.14" version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741" checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
[[package]] [[package]]
name = "privdrop" name = "privdrop"
@ -1572,9 +1585,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.30" version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70" checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43"
dependencies = [ dependencies = [
"unicode-xid", "unicode-xid",
] ]
@ -1691,7 +1704,7 @@ version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
dependencies = [ dependencies = [
"bitflags", "bitflags 1.3.2",
] ]
[[package]] [[package]]
@ -1767,9 +1780,9 @@ dependencies = [
[[package]] [[package]]
name = "rustls" name = "rustls"
version = "0.20.0" version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b5ac6078ca424dc1d3ae2328526a76787fecc7f8011f520e3276730e711fc95" checksum = "dac4581f0fc0e0efd529d069e8189ec7b90b8e7680e21beb35141bdc45f36040"
dependencies = [ dependencies = [
"log", "log",
"ring", "ring",
@ -1839,7 +1852,7 @@ version = "2.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87"
dependencies = [ dependencies = [
"bitflags", "bitflags 1.3.2",
"core-foundation", "core-foundation",
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
@ -1913,9 +1926,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.68" version = "1.0.71"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19"
dependencies = [ dependencies = [
"itoa", "itoa",
"ryu", "ryu",
@ -1975,9 +1988,9 @@ checksum = "c970da16e7c682fa90a261cf0724dee241c9f7831635ecc4e988ae8f3b505559"
[[package]] [[package]]
name = "simplelog" name = "simplelog"
version = "0.10.2" version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85d04ae642154220ef00ee82c36fb07853c10a4f2a0ca6719f9991211d2eb959" checksum = "8baa24de25f3092d9697c76f94cf09f67fca13db2ea11ce80c2f055c1aaf0795"
dependencies = [ dependencies = [
"chrono", "chrono",
"log", "log",
@ -2040,9 +2053,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.80" version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -2145,9 +2158,9 @@ dependencies = [
[[package]] [[package]]
name = "tinyvec" name = "tinyvec"
version = "1.5.0" version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2"
dependencies = [ dependencies = [
"tinyvec_macros", "tinyvec_macros",
] ]
@ -2283,9 +2296,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]] [[package]]
name = "value-trait" name = "value-trait"
version = "0.2.8" version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b637f98040dfa411b01a85b238a8cadbd797b303c23007157dee4bbbd3a72af" checksum = "0393efdd7d82f856a927b0fcafa80bca45911f5c89ef6b9d80197bebc284f72e"
dependencies = [ dependencies = [
"float-cmp", "float-cmp",
"halfbrown", "halfbrown",

View file

@ -15,7 +15,7 @@ of sub-implementations for different protocols:
| Name | Protocol | OS requirements | | Name | Protocol | OS requirements |
|--------------|--------------------------------------------|------------------------------------------------------------| |--------------|--------------------------------------------|------------------------------------------------------------|
| aquatic_udp | [BitTorrent over UDP] | Unix-like with [mio] (default) / Linux 5.8+ with [glommio] | | aquatic_udp | [BitTorrent over UDP] | Unix-like |
| aquatic_http | [BitTorrent over HTTP] with TLS ([rustls]) | Linux 5.8+ | | aquatic_http | [BitTorrent over HTTP] with TLS ([rustls]) | Linux 5.8+ |
| aquatic_ws | [WebTorrent] | Unix-like with [mio] (default) / Linux 5.8+ with [glommio] | | aquatic_ws | [WebTorrent] | Unix-like with [mio] (default) / Linux 5.8+ with [glommio] |
@ -25,8 +25,8 @@ of sub-implementations for different protocols:
- Install Rust with [rustup](https://rustup.rs/) (stable is recommended) - Install Rust with [rustup](https://rustup.rs/) (stable is recommended)
- Install cmake with your package manager (e.g., `apt-get install cmake`) - Install cmake with your package manager (e.g., `apt-get install cmake`)
- Unless you're planning to only run aquatic_udp and only the cross-platform, - Unless you're planning to only run the cross-platform mio based
mio based implementation, make sure locked memory limits are sufficient. implementations, make sure locked memory limits are sufficient.
You can do this by adding the following lines to `/etc/security/limits.conf`, You can do this by adding the following lines to `/etc/security/limits.conf`,
and then logging out and back in: and then logging out and back in:
@ -48,7 +48,6 @@ Compile the implementations that you are interested in:
. ./scripts/env-native-cpu-without-avx-512 . ./scripts/env-native-cpu-without-avx-512
cargo build --release -p aquatic_udp cargo build --release -p aquatic_udp
cargo build --release -p aquatic_udp --features "with-glommio" --no-default-features
cargo build --release -p aquatic_http cargo build --release -p aquatic_http
cargo build --release -p aquatic_ws cargo build --release -p aquatic_ws
cargo build --release -p aquatic_ws --features "with-glommio" --no-default-features cargo build --release -p aquatic_ws --features "with-glommio" --no-default-features
@ -119,16 +118,7 @@ except that it:
source IP is always used. source IP is always used.
* Doesn't track of the number of torrent downloads (0 is always sent). * Doesn't track of the number of torrent downloads (0 is always sent).
Supports IPv4 and IPv6 (BitTorrent UDP protocol doesn't support IPv6 very well, Supports IPv4 and IPv6.
however.)
#### Alternative implementation using io_uring
[io_uring]: https://en.wikipedia.org/wiki/Io_uring
[glommio]: https://github.com/DataDog/glommio
There is an alternative implementation that utilizes [io_uring] by running on
[glommio]. It only runs on Linux and requires a recent kernel (version 5.8 or later).
#### Performance #### Performance
@ -136,6 +126,15 @@ There is an alternative implementation that utilizes [io_uring] by running on
More details are available [here](./documents/aquatic-udp-load-test-2021-11-08.pdf). More details are available [here](./documents/aquatic-udp-load-test-2021-11-08.pdf).
Since making this benchmark, I have improved the mio-based implementation
considerably and removed the glommio-based implementation.
#### Optimisation attempts that didn't work out
* Using glommio
* Using io-uring
* Using zerocopy + vectored sends for responses
### aquatic_http: HTTP BitTorrent tracker ### aquatic_http: HTTP BitTorrent tracker
[HTTP BitTorrent protocol]: https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol [HTTP BitTorrent protocol]: https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol

13
TODO.md
View file

@ -17,13 +17,14 @@
* cargo-deny * cargo-deny
* aquatic_udp * aquatic_udp
* glommio * look at proper cpu pinning (check that one thread gets bound per core)
* consider sending local responses immediately * then consider so_attach_reuseport_cbpf
* consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) * what poll event capacity is actually needed?
containing TransactionId and BTreeMap<usize, InfoHash>, and same for
response
* mio
* stagger connection cleaning intervals? * stagger connection cleaning intervals?
* notes
* load testing shows that with sharded state, mio reaches 1.4M responses per second
with 6 socket and 4 request workers. performance is great overall and faster than
without sharding. io_uring impl is a lot behind mio impl with new load tester
* aquatic_http: * aquatic_http:
* clean out connections regularly * clean out connections regularly

View file

@ -10,5 +10,5 @@ repository = "https://github.com/greatest-ape/aquatic"
[dependencies] [dependencies]
anyhow = "1" anyhow = "1"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
simplelog = "0.10.0" simplelog = "0.11"
toml = "0.5" toml = "0.5"

View file

@ -11,13 +11,13 @@ repository = "https://github.com/greatest-ape/aquatic"
name = "aquatic_common" name = "aquatic_common"
[features] [features]
cpu-pinning = ["affinity"] cpu-pinning = ["hwloc", "libc"]
[dependencies] [dependencies]
ahash = "0.7" ahash = "0.7"
anyhow = "1" anyhow = "1"
arc-swap = "1" arc-swap = "1"
hashbrown = "0.11.2" hashbrown = "0.11"
hex = "0.4" hex = "0.4"
indexmap-amortized = "1" indexmap-amortized = "1"
log = "0.4" log = "0.4"
@ -25,4 +25,6 @@ privdrop = "0.5"
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
affinity = { version = "0.1", optional = true } # cpu-pinning
hwloc = { version = "0.5", optional = true }
libc = { version = "0.2", optional = true }

View file

@ -77,6 +77,10 @@ impl AccessList {
AccessListMode::Off => true, AccessListMode::Off => true,
} }
} }
pub fn len(&self) -> usize {
self.0.len()
}
} }
pub trait AccessListQuery { pub trait AccessListQuery {

View file

@ -1,3 +1,4 @@
use hwloc::{CpuSet, ObjectType, Topology, CPUBIND_THREAD};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -17,8 +18,7 @@ impl Default for CpuPinningMode {
pub struct CpuPinningConfig { pub struct CpuPinningConfig {
pub active: bool, pub active: bool,
pub mode: CpuPinningMode, pub mode: CpuPinningMode,
pub virtual_per_physical_cpu: usize, pub core_offset: usize,
pub offset_cpus: usize,
} }
impl Default for CpuPinningConfig { impl Default for CpuPinningConfig {
@ -26,8 +26,7 @@ impl Default for CpuPinningConfig {
Self { Self {
active: false, active: false,
mode: Default::default(), mode: Default::default(),
virtual_per_physical_cpu: 2, core_offset: 0,
offset_cpus: 0,
} }
} }
} }
@ -49,58 +48,161 @@ pub enum WorkerIndex {
} }
impl WorkerIndex { impl WorkerIndex {
fn get_cpu_indices(self, config: &CpuPinningConfig, socket_workers: usize) -> Vec<usize> { fn get_core_index(
let offset = match self { self,
Self::Other => config.virtual_per_physical_cpu * config.offset_cpus, config: &CpuPinningConfig,
Self::SocketWorker(index) => { socket_workers: usize,
config.virtual_per_physical_cpu * (config.offset_cpus + 1 + index) core_count: usize,
} ) -> usize {
Self::RequestWorker(index) => { let ascending_index = match self {
config.virtual_per_physical_cpu * (config.offset_cpus + 1 + socket_workers + index) Self::Other => config.core_offset,
} Self::SocketWorker(index) => config.core_offset + 1 + index,
Self::RequestWorker(index) => config.core_offset + 1 + socket_workers + index,
}; };
let virtual_cpus = (0..config.virtual_per_physical_cpu).map(|i| offset + i); match config.mode {
CpuPinningMode::Ascending => ascending_index,
let virtual_cpus: Vec<usize> = match config.mode { CpuPinningMode::Descending => core_count - 1 - ascending_index,
CpuPinningMode::Ascending => virtual_cpus.collect(),
CpuPinningMode::Descending => {
let max_index = affinity::get_core_num() - 1;
virtual_cpus
.map(|i| max_index.checked_sub(i).unwrap_or(0))
.collect()
} }
};
::log::info!(
"Calculated virtual CPU pin indices {:?} for {:?}",
virtual_cpus,
self
);
virtual_cpus
} }
} }
/// Note: don't call this when affinities were already set in the current or in /// Pin current thread to a suitable core
/// a parent thread. Doing so limits the number of cores that are seen and ///
/// messes up setting affinities. /// Requires hwloc (`apt-get install libhwloc-dev`)
pub fn pin_current_if_configured_to( pub fn pin_current_if_configured_to(
config: &CpuPinningConfig, config: &CpuPinningConfig,
socket_workers: usize, socket_workers: usize,
worker_index: WorkerIndex, worker_index: WorkerIndex,
) { ) {
if config.active { if config.active {
let indices = worker_index.get_cpu_indices(config, socket_workers); let mut topology = Topology::new();
if let Err(err) = affinity::set_thread_affinity(indices.clone()) { let core_cpu_sets: Vec<CpuSet> = topology
::log::error!( .objects_with_type(&ObjectType::Core)
"Failed setting thread affinities {:?} for {:?}: {:#?}", .expect("hwloc: list cores")
indices, .into_iter()
.map(|core| core.allowed_cpuset().expect("hwloc: get core cpu set"))
.collect();
let core_index = worker_index.get_core_index(config, socket_workers, core_cpu_sets.len());
let cpu_set = core_cpu_sets
.get(core_index)
.expect(&format!("get cpu set for core {}", core_index))
.to_owned();
topology
.set_cpubind(cpu_set, CPUBIND_THREAD)
.expect(&format!("bind thread to core {}", core_index));
::log::info!(
"Pinned worker {:?} to cpu core {}",
worker_index, worker_index,
err core_index
); );
} }
} }
/// Tell Linux that incoming messages should be handled by the socket worker
/// with the same index as the CPU core receiving the interrupt.
///
/// Requires that sockets are actually bound in order, so waiting has to be done
/// in socket workers.
///
/// It might make sense to first enable RSS or RPS (if hardware doesn't support
/// RSS) and enable sending interrupts to all CPUs that have socket workers
/// running on them. Possibly, CPU 0 should be excluded.
///
/// More Information:
/// - https://talawah.io/blog/extreme-http-performance-tuning-one-point-two-million/
/// - https://www.kernel.org/doc/Documentation/networking/scaling.txt
/// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/performance_tuning_guide/network-rps
#[cfg(target_os = "linux")]
pub fn socket_attach_cbpf<S: ::std::os::unix::prelude::AsRawFd>(
socket: &S,
_num_sockets: usize,
) -> ::std::io::Result<()> {
use std::mem::size_of;
use std::os::raw::c_void;
use libc::{setsockopt, sock_filter, sock_fprog, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF};
// Good BPF documentation: https://man.openbsd.org/bpf.4
// Values of constants were copied from the following Linux source files:
// - include/uapi/linux/bpf_common.h
// - include/uapi/linux/filter.h
// Instruction
const BPF_LD: u16 = 0x00; // Load into A
// const BPF_LDX: u16 = 0x01; // Load into X
// const BPF_ALU: u16 = 0x04; // Load into X
const BPF_RET: u16 = 0x06; // Return value
// const BPF_MOD: u16 = 0x90; // Run modulo on A
// Size
const BPF_W: u16 = 0x00; // 32-bit width
// Source
// const BPF_IMM: u16 = 0x00; // Use constant (k)
const BPF_ABS: u16 = 0x20;
// Registers
// const BPF_K: u16 = 0x00;
const BPF_A: u16 = 0x10;
// k
const SKF_AD_OFF: i32 = -0x1000; // Activate extensions
const SKF_AD_CPU: i32 = 36; // Extension for getting CPU
// Return index of socket that should receive packet
let mut filter = [
// Store index of CPU receiving packet in register A
sock_filter {
code: BPF_LD | BPF_W | BPF_ABS,
jt: 0,
jf: 0,
k: u32::from_ne_bytes((SKF_AD_OFF + SKF_AD_CPU).to_ne_bytes()),
},
/* Disabled, because it doesn't make a lot of sense
// Run A = A % socket_workers
sock_filter {
code: BPF_ALU | BPF_MOD,
jt: 0,
jf: 0,
k: num_sockets as u32,
},
*/
// Return A
sock_filter {
code: BPF_RET | BPF_A,
jt: 0,
jf: 0,
k: 0,
},
];
let program = sock_fprog {
filter: filter.as_mut_ptr(),
len: filter.len() as u16,
};
let program_ptr: *const sock_fprog = &program;
unsafe {
let result = setsockopt(
socket.as_raw_fd(),
SOL_SOCKET,
SO_ATTACH_REUSEPORT_CBPF,
program_ptr as *const c_void,
size_of::<sock_fprog>() as u32,
);
if result != 0 {
Err(::std::io::Error::last_os_error())
} else {
Ok(())
}
}
} }

View file

@ -23,6 +23,9 @@ impl ValidUntil {
pub fn new(offset_seconds: u64) -> Self { pub fn new(offset_seconds: u64) -> Self {
Self(Instant::now() + Duration::from_secs(offset_seconds)) Self(Instant::now() + Duration::from_secs(offset_seconds))
} }
pub fn new_with_now(now: Instant, offset_seconds: u64) -> Self {
Self(now + Duration::from_secs(offset_seconds))
}
} }
/// Extract response peers /// Extract response peers

View file

@ -42,5 +42,5 @@ slab = "0.4"
smartstring = "0.2" smartstring = "0.2"
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -18,7 +18,7 @@ aquatic_cli_helpers = "0.1.0"
aquatic_common = "0.1.0" aquatic_common = "0.1.0"
aquatic_http_protocol = "0.1.0" aquatic_http_protocol = "0.1.0"
futures-lite = "1" futures-lite = "1"
hashbrown = "0.11.2" hashbrown = "0.11"
glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" } glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" }
log = "0.4" log = "0.4"
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
@ -28,5 +28,5 @@ rustls = { version = "0.20", features = ["dangerous_configuration"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -23,7 +23,7 @@ harness = false
[dependencies] [dependencies]
anyhow = "1" anyhow = "1"
hashbrown = "0.11.2" hashbrown = "0.11"
hex = { version = "0.4", default-features = false } hex = { version = "0.4", default-features = false }
httparse = "1" httparse = "1"
itoa = "0.4" itoa = "0.4"
@ -33,10 +33,10 @@ rand = { version = "0.8", features = ["small_rng"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_bencode = "0.2" serde_bencode = "0.2"
smartstring = "0.2" smartstring = "0.2"
urlencoding = "2.1.0" urlencoding = "2"
[dev-dependencies] [dev-dependencies]
bendy = { version = "0.3", features = ["std", "serde"] } bendy = { version = "0.3", features = ["std", "serde"] }
criterion = "0.3" criterion = "0.3"
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -15,10 +15,7 @@ path = "src/lib/lib.rs"
name = "aquatic_udp" name = "aquatic_udp"
[features] [features]
default = ["with-mio"]
cpu-pinning = ["aquatic_common/cpu-pinning"] cpu-pinning = ["aquatic_common/cpu-pinning"]
with-glommio = ["cpu-pinning", "glommio", "futures-lite"]
with-mio = ["crossbeam-channel", "histogram", "mio", "socket2"]
[dependencies] [dependencies]
anyhow = "1" anyhow = "1"
@ -26,24 +23,18 @@ aquatic_cli_helpers = "0.1.0"
aquatic_common = "0.1.0" aquatic_common = "0.1.0"
aquatic_udp_protocol = "0.1.0" aquatic_udp_protocol = "0.1.0"
cfg-if = "1" cfg-if = "1"
crossbeam-channel = "0.5"
hex = "0.4" hex = "0.4"
log = "0.4" log = "0.4"
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
mio = { version = "0.8", features = ["net", "os-poll"] }
parking_lot = "0.11" parking_lot = "0.11"
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
slab = "0.4"
signal-hook = { version = "0.3" } signal-hook = { version = "0.3" }
socket2 = { version = "0.4", features = ["all"] }
# mio
crossbeam-channel = { version = "0.5", optional = true }
histogram = { version = "0.6", optional = true }
mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true }
socket2 = { version = "0.4.1", features = ["all"], optional = true }
# glommio
glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5", optional = true }
futures-lite = { version = "1", optional = true }
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -1,19 +1,19 @@
use std::collections::BTreeMap;
use std::hash::Hash; use std::hash::Hash;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::sync::atomic::AtomicUsize;
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
use crossbeam_channel::Sender;
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap}; use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap};
use aquatic_common::AHashIndexMap; use aquatic_common::AHashIndexMap;
use aquatic_common::ValidUntil;
pub use aquatic_common::{access_list::AccessList, ValidUntil}; use aquatic_udp_protocol::*;
pub use aquatic_udp_protocol::*;
use crate::config::Config; use crate::config::Config;
pub mod handlers;
pub mod network;
pub const MAX_PACKET_SIZE: usize = 8192; pub const MAX_PACKET_SIZE: usize = 8192;
pub trait Ip: Hash + PartialEq + Eq + Clone + Copy { pub trait Ip: Hash + PartialEq + Eq + Clone + Copy {
@ -32,6 +32,89 @@ impl Ip for Ipv6Addr {
} }
} }
#[derive(Debug)]
pub struct PendingScrapeRequest {
pub transaction_id: TransactionId,
pub info_hashes: BTreeMap<usize, InfoHash>,
}
#[derive(Debug)]
pub struct PendingScrapeResponse {
pub transaction_id: TransactionId,
pub torrent_stats: BTreeMap<usize, TorrentScrapeStatistics>,
}
#[derive(Debug)]
pub enum ConnectedRequest {
Announce(AnnounceRequest),
Scrape(PendingScrapeRequest),
}
#[derive(Debug)]
pub enum ConnectedResponse {
AnnounceIpv4(AnnounceResponseIpv4),
AnnounceIpv6(AnnounceResponseIpv6),
Scrape(PendingScrapeResponse),
}
#[derive(Clone, Copy, Debug)]
pub struct SocketWorkerIndex(pub usize);
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct RequestWorkerIndex(pub usize);
impl RequestWorkerIndex {
pub fn from_info_hash(config: &Config, info_hash: InfoHash) -> Self {
Self(info_hash.0[0] as usize % config.request_workers)
}
}
pub struct ConnectedRequestSender {
index: SocketWorkerIndex,
senders: Vec<Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>>,
}
impl ConnectedRequestSender {
pub fn new(
index: SocketWorkerIndex,
senders: Vec<Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>>,
) -> Self {
Self { index, senders }
}
pub fn try_send_to(
&self,
index: RequestWorkerIndex,
request: ConnectedRequest,
addr: SocketAddr,
) {
if let Err(err) = self.senders[index.0].try_send((self.index, request, addr)) {
::log::warn!("request_sender.try_send failed: {:?}", err)
}
}
}
pub struct ConnectedResponseSender {
senders: Vec<Sender<(ConnectedResponse, SocketAddr)>>,
}
impl ConnectedResponseSender {
pub fn new(senders: Vec<Sender<(ConnectedResponse, SocketAddr)>>) -> Self {
Self { senders }
}
pub fn try_send_to(
&self,
index: SocketWorkerIndex,
response: ConnectedResponse,
addr: SocketAddr,
) {
if let Err(err) = self.senders[index.0].try_send((response, addr)) {
::log::warn!("request_sender.try_send failed: {:?}", err)
}
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum PeerStatus { pub enum PeerStatus {
Seeding, Seeding,
@ -63,23 +146,7 @@ pub struct Peer<I: Ip> {
pub valid_until: ValidUntil, pub valid_until: ValidUntil,
} }
impl<I: Ip> Peer<I> { pub type PeerMap<I> = AHashIndexMap<PeerId, Peer<I>>;
#[inline(always)]
pub fn to_response_peer(&self) -> ResponsePeer {
ResponsePeer {
ip_address: self.ip_address.ip_addr(),
port: self.port,
}
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct PeerMapKey<I: Ip> {
pub ip: I,
pub peer_id: PeerId,
}
pub type PeerMap<I> = AHashIndexMap<PeerMapKey<I>, Peer<I>>;
pub struct TorrentData<I: Ip> { pub struct TorrentData<I: Ip> {
pub peers: PeerMap<I>, pub peers: PeerMap<I>,
@ -160,9 +227,56 @@ impl TorrentMaps {
} }
} }
pub struct Statistics {
pub requests_received: AtomicUsize,
pub responses_sent: AtomicUsize,
pub bytes_received: AtomicUsize,
pub bytes_sent: AtomicUsize,
pub torrents_ipv4: Vec<AtomicUsize>,
pub torrents_ipv6: Vec<AtomicUsize>,
pub peers_ipv4: Vec<AtomicUsize>,
pub peers_ipv6: Vec<AtomicUsize>,
}
impl Statistics {
pub fn new(num_request_workers: usize) -> Self {
Self {
requests_received: Default::default(),
responses_sent: Default::default(),
bytes_received: Default::default(),
bytes_sent: Default::default(),
torrents_ipv4: Self::create_atomic_usize_vec(num_request_workers),
torrents_ipv6: Self::create_atomic_usize_vec(num_request_workers),
peers_ipv4: Self::create_atomic_usize_vec(num_request_workers),
peers_ipv6: Self::create_atomic_usize_vec(num_request_workers),
}
}
fn create_atomic_usize_vec(len: usize) -> Vec<AtomicUsize> {
::std::iter::repeat_with(|| AtomicUsize::default())
.take(len)
.collect()
}
}
#[derive(Clone)]
pub struct State {
pub access_list: Arc<AccessListArcSwap>,
pub statistics: Arc<Statistics>,
}
impl State {
pub fn new(num_request_workers: usize) -> Self {
Self {
access_list: Arc::new(AccessListArcSwap::default()),
statistics: Arc::new(Statistics::new(num_request_workers)),
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::net::{IpAddr, Ipv6Addr}; use std::net::Ipv6Addr;
use crate::{common::MAX_PACKET_SIZE, config::Config}; use crate::{common::MAX_PACKET_SIZE, config::Config};
@ -195,14 +309,14 @@ mod tests {
let config = Config::default(); let config = Config::default();
let peers = ::std::iter::repeat(ResponsePeer { let peers = ::std::iter::repeat(ResponsePeerIpv6 {
ip_address: IpAddr::V6(Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1)), ip_address: Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1),
port: Port(1), port: Port(1),
}) })
.take(config.protocol.max_response_peers) .take(config.protocol.max_response_peers)
.collect(); .collect();
let response = Response::Announce(AnnounceResponse { let response = Response::AnnounceIpv6(AnnounceResponseIpv6 {
transaction_id: TransactionId(1), transaction_id: TransactionId(1),
announce_interval: AnnounceInterval(1), announce_interval: AnnounceInterval(1),
seeders: NumberOfPeers(1), seeders: NumberOfPeers(1),
@ -212,7 +326,7 @@ mod tests {
let mut buf = Vec::new(); let mut buf = Vec::new();
response.write(&mut buf, IpVersion::IPv6).unwrap(); response.write(&mut buf).unwrap();
println!("Buffer len: {}", buf.len()); println!("Buffer len: {}", buf.len());

View file

@ -1,288 +0,0 @@
use std::net::SocketAddr;
use rand::rngs::SmallRng;
use aquatic_common::convert_ipv4_mapped_ipv6;
use aquatic_common::extract_response_peers;
use crate::common::*;
#[derive(Debug)]
pub enum ConnectedRequest {
Announce(AnnounceRequest),
Scrape {
request: ScrapeRequest,
/// Currently only used by glommio implementation
original_indices: Vec<usize>,
},
}
#[derive(Debug)]
pub enum ConnectedResponse {
Announce(AnnounceResponse),
Scrape {
response: ScrapeResponse,
/// Currently only used by glommio implementation
original_indices: Vec<usize>,
},
}
impl Into<Response> for ConnectedResponse {
fn into(self) -> Response {
match self {
Self::Announce(response) => Response::Announce(response),
Self::Scrape { response, .. } => Response::Scrape(response),
}
}
}
pub fn handle_announce_request(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMaps,
request: AnnounceRequest,
src: SocketAddr,
peer_valid_until: ValidUntil,
) -> AnnounceResponse {
match convert_ipv4_mapped_ipv6(src.ip()) {
IpAddr::V4(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv4,
request,
ip,
peer_valid_until,
),
IpAddr::V6(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv6,
request,
ip,
peer_valid_until,
),
}
}
fn handle_announce_request_inner<I: Ip>(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMap<I>,
request: AnnounceRequest,
peer_ip: I,
peer_valid_until: ValidUntil,
) -> AnnounceResponse {
let peer_key = PeerMapKey {
ip: peer_ip,
peer_id: request.peer_id,
};
let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
let peer = Peer {
ip_address: peer_ip,
port: request.port,
status: peer_status,
valid_until: peer_valid_until,
};
let torrent_data = torrents.entry(request.info_hash).or_default();
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
torrent_data.peers.insert(peer_key, peer)
}
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
torrent_data.peers.insert(peer_key, peer)
}
PeerStatus::Stopped => torrent_data.peers.remove(&peer_key),
};
match opt_removed_peer.map(|peer| peer.status) {
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers -= 1;
}
Some(PeerStatus::Seeding) => {
torrent_data.num_seeders -= 1;
}
_ => {}
}
let max_num_peers_to_take = calc_max_num_peers_to_take(config, request.peers_wanted.0);
let response_peers = extract_response_peers(
rng,
&torrent_data.peers,
max_num_peers_to_take,
peer_key,
Peer::to_response_peer,
);
AnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.protocol.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers as i32),
seeders: NumberOfPeers(torrent_data.num_seeders as i32),
peers: response_peers,
}
}
#[inline]
fn calc_max_num_peers_to_take(config: &Config, peers_wanted: i32) -> usize {
if peers_wanted <= 0 {
config.protocol.max_response_peers as usize
} else {
::std::cmp::min(
config.protocol.max_response_peers as usize,
peers_wanted as usize,
)
}
}
#[inline]
pub fn handle_scrape_request(
torrents: &mut TorrentMaps,
src: SocketAddr,
request: ScrapeRequest,
) -> ScrapeResponse {
const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0);
let mut stats: Vec<TorrentScrapeStatistics> = Vec::with_capacity(request.info_hashes.len());
let peer_ip = convert_ipv4_mapped_ipv6(src.ip());
if peer_ip.is_ipv4() {
for info_hash in request.info_hashes.iter() {
if let Some(torrent_data) = torrents.ipv4.get(info_hash) {
stats.push(create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
));
} else {
stats.push(EMPTY_STATS);
}
}
} else {
for info_hash in request.info_hashes.iter() {
if let Some(torrent_data) = torrents.ipv6.get(info_hash) {
stats.push(create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
));
} else {
stats.push(EMPTY_STATS);
}
}
}
ScrapeResponse {
transaction_id: request.transaction_id,
torrent_stats: stats,
}
}
#[inline(always)]
const fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(0), // No implementation planned
leechers: NumberOfPeers(leechers),
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::net::Ipv4Addr;
use quickcheck::{quickcheck, TestResult};
use rand::thread_rng;
use super::*;
fn gen_peer_map_key_and_value(i: u32) -> (PeerMapKey<Ipv4Addr>, Peer<Ipv4Addr>) {
let ip_address = Ipv4Addr::from(i.to_be_bytes());
let peer_id = PeerId([0; 20]);
let key = PeerMapKey {
ip: ip_address,
peer_id,
};
let value = Peer {
ip_address,
port: Port(1),
status: PeerStatus::Leeching,
valid_until: ValidUntil::new(0),
};
(key, value)
}
#[test]
fn test_extract_response_peers() {
fn prop(data: (u16, u16)) -> TestResult {
let gen_num_peers = data.0 as u32;
let req_num_peers = data.1 as usize;
let mut peer_map: PeerMap<Ipv4Addr> = Default::default();
let mut opt_sender_key = None;
let mut opt_sender_peer = None;
for i in 0..gen_num_peers {
let (key, value) = gen_peer_map_key_and_value((i << 16) + i);
if i == 0 {
opt_sender_key = Some(key);
opt_sender_peer = Some(value.to_response_peer());
}
peer_map.insert(key, value);
}
let mut rng = thread_rng();
let peers = extract_response_peers(
&mut rng,
&peer_map,
req_num_peers,
opt_sender_key.unwrap_or_else(|| gen_peer_map_key_and_value(1).0),
Peer::to_response_peer,
);
// Check that number of returned peers is correct
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize
|| peers.len() + 1 == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap) and that sender
// isn't returned
let mut ip_addresses = HashSet::with_capacity(peers.len());
for peer in peers {
if peer == opt_sender_peer.clone().unwrap()
|| ip_addresses.contains(&peer.ip_address)
{
success = false;
break;
}
ip_addresses.insert(peer.ip_address);
}
TestResult::from_bool(success)
}
quickcheck(prop as fn((u16, u16)) -> TestResult);
}
}

View file

@ -1,30 +0,0 @@
use std::{net::SocketAddr, time::Instant};
use aquatic_common::AHashIndexMap;
pub use aquatic_common::{access_list::AccessList, ValidUntil};
pub use aquatic_udp_protocol::*;
#[derive(Default)]
pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>);
impl ConnectionMap {
pub fn insert(
&mut self,
connection_id: ConnectionId,
socket_addr: SocketAddr,
valid_until: ValidUntil,
) {
self.0.insert((connection_id, socket_addr), valid_until);
}
pub fn contains(&self, connection_id: ConnectionId, socket_addr: SocketAddr) -> bool {
self.0.contains_key(&(connection_id, socket_addr))
}
pub fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.0 > now);
self.0.shrink_to_fit();
}
}

View file

@ -18,9 +18,7 @@ pub struct Config {
pub log_level: LogLevel, pub log_level: LogLevel,
pub network: NetworkConfig, pub network: NetworkConfig,
pub protocol: ProtocolConfig, pub protocol: ProtocolConfig,
#[cfg(feature = "with-mio")]
pub handlers: HandlerConfig, pub handlers: HandlerConfig,
#[cfg(feature = "with-mio")]
pub statistics: StatisticsConfig, pub statistics: StatisticsConfig,
pub cleaning: CleaningConfig, pub cleaning: CleaningConfig,
pub privileges: PrivilegeConfig, pub privileges: PrivilegeConfig,
@ -29,6 +27,25 @@ pub struct Config {
pub cpu_pinning: aquatic_common::cpu_pinning::CpuPinningConfig, pub cpu_pinning: aquatic_common::cpu_pinning::CpuPinningConfig,
} }
impl Default for Config {
fn default() -> Self {
Self {
socket_workers: 1,
request_workers: 1,
log_level: LogLevel::Error,
network: NetworkConfig::default(),
protocol: ProtocolConfig::default(),
handlers: HandlerConfig::default(),
statistics: StatisticsConfig::default(),
cleaning: CleaningConfig::default(),
privileges: PrivilegeConfig::default(),
access_list: AccessListConfig::default(),
#[cfg(feature = "cpu-pinning")]
cpu_pinning: Default::default(),
}
}
}
impl aquatic_cli_helpers::Config for Config { impl aquatic_cli_helpers::Config for Config {
fn get_log_level(&self) -> Option<LogLevel> { fn get_log_level(&self) -> Option<LogLevel> {
Some(self.log_level) Some(self.log_level)
@ -40,6 +57,7 @@ impl aquatic_cli_helpers::Config for Config {
pub struct NetworkConfig { pub struct NetworkConfig {
/// Bind to this address /// Bind to this address
pub address: SocketAddr, pub address: SocketAddr,
pub only_ipv6: bool,
/// Size of socket recv buffer. Use 0 for OS default. /// Size of socket recv buffer. Use 0 for OS default.
/// ///
/// This setting can have a big impact on dropped packages. It might /// This setting can have a big impact on dropped packages. It might
@ -55,8 +73,20 @@ pub struct NetworkConfig {
/// $ sudo sysctl -w net.core.rmem_max=104857600 /// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600 /// $ sudo sysctl -w net.core.rmem_default=104857600
pub socket_recv_buffer_size: usize, pub socket_recv_buffer_size: usize,
#[cfg(feature = "with-mio")]
pub poll_event_capacity: usize, pub poll_event_capacity: usize,
pub poll_timeout_ms: u64,
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
only_ipv6: false,
socket_recv_buffer_size: 4096 * 128,
poll_event_capacity: 4096,
poll_timeout_ms: 50,
}
}
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -70,69 +100,6 @@ pub struct ProtocolConfig {
pub peer_announce_interval: i32, pub peer_announce_interval: i32,
} }
#[cfg(feature = "with-mio")]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Maximum number of requests to receive from channel before locking
/// mutex and starting work
pub max_requests_per_iter: usize,
pub channel_recv_timeout_microseconds: u64,
}
#[cfg(feature = "with-mio")]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct StatisticsConfig {
/// Print statistics this often (seconds). Don't print when set to zero.
pub interval: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct CleaningConfig {
/// Clean connections this often (seconds)
pub connection_cleaning_interval: u64,
/// Clean torrents this often (seconds)
pub torrent_cleaning_interval: u64,
/// Remove connections that are older than this (seconds)
pub max_connection_age: u64,
/// Remove peers that haven't announced for this long (seconds)
pub max_peer_age: u64,
}
impl Default for Config {
fn default() -> Self {
Self {
socket_workers: 1,
request_workers: 1,
log_level: LogLevel::Error,
network: NetworkConfig::default(),
protocol: ProtocolConfig::default(),
#[cfg(feature = "with-mio")]
handlers: HandlerConfig::default(),
#[cfg(feature = "with-mio")]
statistics: StatisticsConfig::default(),
cleaning: CleaningConfig::default(),
privileges: PrivilegeConfig::default(),
access_list: AccessListConfig::default(),
#[cfg(feature = "cpu-pinning")]
cpu_pinning: Default::default(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
socket_recv_buffer_size: 4096 * 128,
#[cfg(feature = "with-mio")]
poll_event_capacity: 4096,
}
}
}
impl Default for ProtocolConfig { impl Default for ProtocolConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
@ -143,30 +110,64 @@ impl Default for ProtocolConfig {
} }
} }
#[cfg(feature = "with-mio")] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
pub channel_recv_timeout_ms: u64,
}
impl Default for HandlerConfig { impl Default for HandlerConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
max_requests_per_iter: 10000, channel_recv_timeout_ms: 100,
channel_recv_timeout_microseconds: 200,
} }
} }
} }
#[cfg(feature = "with-mio")] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct StatisticsConfig {
/// Print statistics this often (seconds). Don't print when set to zero.
pub interval: u64,
}
impl Default for StatisticsConfig { impl Default for StatisticsConfig {
fn default() -> Self { fn default() -> Self {
Self { interval: 0 } Self { interval: 0 }
} }
} }
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct CleaningConfig {
/// Clean connections this often (seconds)
pub connection_cleaning_interval: u64,
/// Clean torrents this often (seconds)
pub torrent_cleaning_interval: u64,
/// Clean pending scrape responses this often (seconds)
///
/// In regular operation, there should be no pending scrape responses
/// lingering for a long time. However, the cleaning also returns unused
/// allocated memory to the OS, so the interval can be configured here.
pub pending_scrape_cleaning_interval: u64,
/// Remove connections that are older than this (seconds)
pub max_connection_age: u64,
/// Remove peers that haven't announced for this long (seconds)
pub max_peer_age: u64,
/// Remove pending scrape responses that haven't been returned from request
/// workers for this long (seconds)
pub max_pending_scrape_age: u64,
}
impl Default for CleaningConfig { impl Default for CleaningConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
connection_cleaning_interval: 60, connection_cleaning_interval: 60,
torrent_cleaning_interval: 60 * 2, torrent_cleaning_interval: 60 * 2,
pending_scrape_cleaning_interval: 60 * 10,
max_connection_age: 60 * 5, max_connection_age: 60 * 5,
max_peer_age: 60 * 20, max_peer_age: 60 * 20,
max_pending_scrape_age: 60,
} }
} }
} }

View file

@ -1,8 +0,0 @@
use std::sync::Arc;
use aquatic_common::access_list::AccessListArcSwap;
#[derive(Default, Clone)]
pub struct State {
pub access_list: Arc<AccessListArcSwap>,
}

View file

@ -1,117 +0,0 @@
use std::cell::RefCell;
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::Duration;
use futures_lite::{Stream, StreamExt};
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
use glommio::timer::TimerActionRepeat;
use glommio::{enclose, prelude::*};
use rand::prelude::SmallRng;
use rand::SeedableRng;
use crate::common::handlers::handle_announce_request;
use crate::common::handlers::*;
use crate::common::*;
use crate::config::Config;
use super::common::State;
pub async fn run_request_worker(
config: Config,
state: State,
request_mesh_builder: MeshBuilder<(usize, ConnectedRequest, SocketAddr), Partial>,
response_mesh_builder: MeshBuilder<(ConnectedResponse, SocketAddr), Partial>,
) {
let (_, mut request_receivers) = request_mesh_builder.join(Role::Consumer).await.unwrap();
let (response_senders, _) = response_mesh_builder.join(Role::Producer).await.unwrap();
let response_senders = Rc::new(response_senders);
let torrents = Rc::new(RefCell::new(TorrentMaps::default()));
// Periodically clean torrents
TimerActionRepeat::repeat(enclose!((config, torrents, state) move || {
enclose!((config, torrents, state) move || async move {
torrents.borrow_mut().clean(&config, &state.access_list);
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
})()
}));
let mut handles = Vec::new();
for (_, receiver) in request_receivers.streams() {
let handle = spawn_local(handle_request_stream(
config.clone(),
torrents.clone(),
response_senders.clone(),
receiver,
))
.detach();
handles.push(handle);
}
for handle in handles {
handle.await;
}
}
async fn handle_request_stream<S>(
config: Config,
torrents: Rc<RefCell<TorrentMaps>>,
response_senders: Rc<Senders<(ConnectedResponse, SocketAddr)>>,
mut stream: S,
) where
S: Stream<Item = (usize, ConnectedRequest, SocketAddr)> + ::std::marker::Unpin,
{
let mut rng = SmallRng::from_entropy();
let max_peer_age = config.cleaning.max_peer_age;
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_peer_age)));
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
enclose!((peer_valid_until) move || async move {
*peer_valid_until.borrow_mut() = ValidUntil::new(max_peer_age);
Some(Duration::from_secs(1))
})()
}));
while let Some((producer_index, request, src)) = stream.next().await {
let response = match request {
ConnectedRequest::Announce(request) => {
ConnectedResponse::Announce(handle_announce_request(
&config,
&mut rng,
&mut torrents.borrow_mut(),
request,
src,
peer_valid_until.borrow().to_owned(),
))
}
ConnectedRequest::Scrape {
request,
original_indices,
} => {
let response = handle_scrape_request(&mut torrents.borrow_mut(), src, request);
ConnectedResponse::Scrape {
response,
original_indices,
}
}
};
::log::debug!("preparing to send response to channel: {:?}", response);
if let Err(err) = response_senders
.send_to(producer_index, (response, src))
.await
{
::log::error!("response_sender.send: {:?}", err);
}
yield_if_needed().await;
}
}

View file

@ -1,135 +0,0 @@
use std::sync::{atomic::AtomicUsize, Arc};
use aquatic_common::access_list::update_access_list;
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
use aquatic_common::privileges::drop_privileges_after_socket_binding;
use glommio::channels::channel_mesh::MeshBuilder;
use glommio::prelude::*;
use signal_hook::consts::SIGUSR1;
use signal_hook::iterator::Signals;
use crate::config::Config;
use self::common::State;
mod common;
pub mod handlers;
pub mod network;
pub const SHARED_CHANNEL_SIZE: usize = 4096;
pub fn run(config: Config) -> ::anyhow::Result<()> {
let state = State::default();
update_access_list(&config.access_list, &state.access_list)?;
let mut signals = Signals::new(::std::iter::once(SIGUSR1))?;
{
let config = config.clone();
let state = state.clone();
::std::thread::spawn(move || run_inner(config, state));
}
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
for signal in &mut signals {
match signal {
SIGUSR1 => {
let _ = update_access_list(&config.access_list, &state.access_list);
}
_ => unreachable!(),
}
}
Ok(())
}
pub fn run_inner(config: Config, state: State) -> anyhow::Result<()> {
let num_peers = config.socket_workers + config.request_workers;
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE);
let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE);
let num_bound_sockets = Arc::new(AtomicUsize::new(0));
let mut executors = Vec::new();
for i in 0..(config.socket_workers) {
let config = config.clone();
let state = state.clone();
let request_mesh_builder = request_mesh_builder.clone();
let response_mesh_builder = response_mesh_builder.clone();
let num_bound_sockets = num_bound_sockets.clone();
let builder = LocalExecutorBuilder::default().name("socket");
let executor = builder.spawn(move || async move {
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::SocketWorker(i),
);
network::run_socket_worker(
config,
state,
request_mesh_builder,
response_mesh_builder,
num_bound_sockets,
)
.await
});
executors.push(executor);
}
for i in 0..(config.request_workers) {
let config = config.clone();
let state = state.clone();
let request_mesh_builder = request_mesh_builder.clone();
let response_mesh_builder = response_mesh_builder.clone();
let builder = LocalExecutorBuilder::default().name("request");
let executor = builder.spawn(move || async move {
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::RequestWorker(i),
);
handlers::run_request_worker(config, state, request_mesh_builder, response_mesh_builder)
.await
});
executors.push(executor);
}
drop_privileges_after_socket_binding(
&config.privileges,
num_bound_sockets,
config.socket_workers,
)
.unwrap();
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
for executor in executors {
executor
.expect("failed to spawn local executor")
.join()
.unwrap();
}
Ok(())
}

View file

@ -1,428 +0,0 @@
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::io::Cursor;
use std::net::{IpAddr, SocketAddr};
use std::rc::Rc;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::{Duration, Instant};
use aquatic_common::access_list::create_access_list_cache;
use aquatic_common::AHashIndexMap;
use futures_lite::{Stream, StreamExt};
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
use glommio::channels::local_channel::{new_unbounded, LocalSender};
use glommio::enclose;
use glommio::net::UdpSocket;
use glommio::prelude::*;
use glommio::timer::TimerActionRepeat;
use rand::prelude::{Rng, SeedableRng, StdRng};
use aquatic_udp_protocol::{IpVersion, Request, Response};
use super::common::State;
use crate::common::handlers::*;
use crate::common::network::ConnectionMap;
use crate::common::*;
use crate::config::Config;
const PENDING_SCRAPE_MAX_WAIT: u64 = 30;
struct PendingScrapeResponse {
pending_worker_responses: usize,
valid_until: ValidUntil,
stats: BTreeMap<usize, TorrentScrapeStatistics>,
}
#[derive(Default)]
struct PendingScrapeResponses(AHashIndexMap<TransactionId, PendingScrapeResponse>);
impl PendingScrapeResponses {
fn prepare(
&mut self,
transaction_id: TransactionId,
pending_worker_responses: usize,
valid_until: ValidUntil,
) {
let pending = PendingScrapeResponse {
pending_worker_responses,
valid_until,
stats: BTreeMap::new(),
};
self.0.insert(transaction_id, pending);
}
fn add_and_get_finished(
&mut self,
mut response: ScrapeResponse,
mut original_indices: Vec<usize>,
) -> Option<ScrapeResponse> {
let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) {
r.pending_worker_responses -= 1;
r.stats.extend(
original_indices
.drain(..)
.zip(response.torrent_stats.drain(..)),
);
r.pending_worker_responses == 0
} else {
::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map");
false
};
if finished {
let PendingScrapeResponse { stats, .. } =
self.0.remove(&response.transaction_id).unwrap();
Some(ScrapeResponse {
transaction_id: response.transaction_id,
torrent_stats: stats.into_values().collect(),
})
} else {
None
}
}
fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.valid_until.0 > now);
self.0.shrink_to_fit();
}
}
pub async fn run_socket_worker(
config: Config,
state: State,
request_mesh_builder: MeshBuilder<(usize, ConnectedRequest, SocketAddr), Partial>,
response_mesh_builder: MeshBuilder<(ConnectedResponse, SocketAddr), Partial>,
num_bound_sockets: Arc<AtomicUsize>,
) {
let (local_sender, local_receiver) = new_unbounded();
let mut socket = UdpSocket::bind(config.network.address).unwrap();
let recv_buffer_size = config.network.socket_recv_buffer_size;
if recv_buffer_size != 0 {
socket.set_buffer_size(recv_buffer_size);
}
let socket = Rc::new(socket);
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let (request_senders, _) = request_mesh_builder.join(Role::Producer).await.unwrap();
let (_, mut response_receivers) = response_mesh_builder.join(Role::Consumer).await.unwrap();
let response_consumer_index = response_receivers.consumer_id().unwrap();
let pending_scrape_responses = Rc::new(RefCell::new(PendingScrapeResponses::default()));
// Periodically clean pending_scrape_responses
TimerActionRepeat::repeat(enclose!((pending_scrape_responses) move || {
enclose!((pending_scrape_responses) move || async move {
pending_scrape_responses.borrow_mut().clean();
Some(Duration::from_secs(120))
})()
}));
spawn_local(enclose!((pending_scrape_responses) read_requests(
config.clone(),
state,
request_senders,
response_consumer_index,
local_sender,
socket.clone(),
pending_scrape_responses,
)))
.detach();
for (_, receiver) in response_receivers.streams().into_iter() {
spawn_local(enclose!((pending_scrape_responses) handle_shared_responses(
socket.clone(),
pending_scrape_responses,
receiver,
)))
.detach();
}
send_local_responses(socket, local_receiver.stream()).await;
}
async fn read_requests(
config: Config,
state: State,
request_senders: Senders<(usize, ConnectedRequest, SocketAddr)>,
response_consumer_index: usize,
local_sender: LocalSender<(Response, SocketAddr)>,
socket: Rc<UdpSocket>,
pending_scrape_responses: Rc<RefCell<PendingScrapeResponses>>,
) {
let mut rng = StdRng::from_entropy();
let access_list_mode = config.access_list.mode;
let max_connection_age = config.cleaning.max_connection_age;
let connection_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_connection_age)));
let pending_scrape_valid_until =
Rc::new(RefCell::new(ValidUntil::new(PENDING_SCRAPE_MAX_WAIT)));
let connections = Rc::new(RefCell::new(ConnectionMap::default()));
let mut access_list_cache = create_access_list_cache(&state.access_list);
// Periodically update connection_valid_until
TimerActionRepeat::repeat(enclose!((connection_valid_until) move || {
enclose!((connection_valid_until) move || async move {
*connection_valid_until.borrow_mut() = ValidUntil::new(max_connection_age);
Some(Duration::from_secs(1))
})()
}));
// Periodically update pending_scrape_valid_until
TimerActionRepeat::repeat(enclose!((pending_scrape_valid_until) move || {
enclose!((pending_scrape_valid_until) move || async move {
*pending_scrape_valid_until.borrow_mut() = ValidUntil::new(PENDING_SCRAPE_MAX_WAIT);
Some(Duration::from_secs(10))
})()
}));
// Periodically clean connections
TimerActionRepeat::repeat(enclose!((config, connections) move || {
enclose!((config, connections) move || async move {
connections.borrow_mut().clean();
Some(Duration::from_secs(config.cleaning.connection_cleaning_interval))
})()
}));
let mut buf = [0u8; MAX_PACKET_SIZE];
loop {
match socket.recv_from(&mut buf).await {
Ok((amt, src)) => {
let request = Request::from_bytes(&buf[..amt], config.protocol.max_scrape_torrents);
::log::debug!("read request: {:?}", request);
match request {
Ok(Request::Connect(request)) => {
let connection_id = ConnectionId(rng.gen());
connections.borrow_mut().insert(
connection_id,
src,
connection_valid_until.borrow().to_owned(),
);
let response = Response::Connect(ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
});
local_sender.try_send((response, src)).unwrap();
}
Ok(Request::Announce(request)) => {
if connections.borrow().contains(request.connection_id, src) {
if access_list_cache
.load()
.allows(access_list_mode, &request.info_hash.0)
{
let request_consumer_index =
calculate_request_consumer_index(&config, request.info_hash);
if let Err(err) = request_senders
.send_to(
request_consumer_index,
(
response_consumer_index,
ConnectedRequest::Announce(request),
src,
),
)
.await
{
::log::error!("request_sender.try_send failed: {:?}", err)
}
} else {
let response = Response::Error(ErrorResponse {
transaction_id: request.transaction_id,
message: "Info hash not allowed".into(),
});
local_sender.try_send((response, src)).unwrap();
}
}
}
Ok(Request::Scrape(ScrapeRequest {
transaction_id,
connection_id,
info_hashes,
})) => {
if connections.borrow().contains(connection_id, src) {
let mut consumer_requests: AHashIndexMap<
usize,
(ScrapeRequest, Vec<usize>),
> = Default::default();
for (i, info_hash) in info_hashes.into_iter().enumerate() {
let (req, indices) = consumer_requests
.entry(calculate_request_consumer_index(&config, info_hash))
.or_insert_with(|| {
let request = ScrapeRequest {
transaction_id: transaction_id,
connection_id: connection_id,
info_hashes: Vec::new(),
};
(request, Vec::new())
});
req.info_hashes.push(info_hash);
indices.push(i);
}
pending_scrape_responses.borrow_mut().prepare(
transaction_id,
consumer_requests.len(),
pending_scrape_valid_until.borrow().to_owned(),
);
for (consumer_index, (request, original_indices)) in consumer_requests {
let request = ConnectedRequest::Scrape {
request,
original_indices,
};
if let Err(err) = request_senders
.send_to(
consumer_index,
(response_consumer_index, request, src),
)
.await
{
::log::error!("request_sender.send failed: {:?}", err)
}
}
}
}
Err(err) => {
::log::debug!("Request::from_bytes error: {:?}", err);
if let RequestParseError::Sendable {
connection_id,
transaction_id,
err,
} = err
{
if connections.borrow().contains(connection_id, src) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
};
local_sender.try_send((response.into(), src)).unwrap();
}
}
}
}
}
Err(err) => {
::log::error!("recv_from: {:?}", err);
}
}
yield_if_needed().await;
}
}
async fn handle_shared_responses<S>(
socket: Rc<UdpSocket>,
pending_scrape_responses: Rc<RefCell<PendingScrapeResponses>>,
mut stream: S,
) where
S: Stream<Item = (ConnectedResponse, SocketAddr)> + ::std::marker::Unpin,
{
let mut buf = [0u8; MAX_PACKET_SIZE];
let mut buf = Cursor::new(&mut buf[..]);
while let Some((response, addr)) = stream.next().await {
let opt_response = match response {
ConnectedResponse::Announce(response) => Some((Response::Announce(response), addr)),
ConnectedResponse::Scrape {
response,
original_indices,
} => pending_scrape_responses
.borrow_mut()
.add_and_get_finished(response, original_indices)
.map(|response| (Response::Scrape(response), addr)),
};
if let Some((response, addr)) = opt_response {
write_response_to_socket(&socket, &mut buf, addr, response).await;
}
yield_if_needed().await;
}
}
async fn send_local_responses<S>(socket: Rc<UdpSocket>, mut stream: S)
where
S: Stream<Item = (Response, SocketAddr)> + ::std::marker::Unpin,
{
let mut buf = [0u8; MAX_PACKET_SIZE];
let mut buf = Cursor::new(&mut buf[..]);
while let Some((response, addr)) = stream.next().await {
write_response_to_socket(&socket, &mut buf, addr, response).await;
yield_if_needed().await;
}
}
async fn write_response_to_socket(
socket: &Rc<UdpSocket>,
buf: &mut Cursor<&mut [u8]>,
addr: SocketAddr,
response: Response,
) {
buf.set_position(0);
::log::debug!("preparing to send response: {:?}", response.clone());
response
.write(buf, ip_version_from_ip(addr.ip()))
.expect("write response");
let position = buf.position() as usize;
if let Err(err) = socket.send_to(&buf.get_ref()[..position], addr).await {
::log::info!("send_to failed: {:?}", err);
}
}
fn calculate_request_consumer_index(config: &Config, info_hash: InfoHash) -> usize {
(info_hash.0[0] as usize) % config.request_workers
}
fn ip_version_from_ip(ip: IpAddr) -> IpVersion {
match ip {
IpAddr::V4(_) => IpVersion::IPv4,
IpAddr::V6(ip) => {
if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() {
IpVersion::IPv4
} else {
IpVersion::IPv6
}
}
}
}

View file

@ -0,0 +1,405 @@
use std::collections::BTreeMap;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::sync::atomic::Ordering;
use std::time::Duration;
use std::time::Instant;
use aquatic_common::ValidUntil;
use crossbeam_channel::Receiver;
use rand::{rngs::SmallRng, SeedableRng};
use aquatic_common::extract_response_peers;
use aquatic_udp_protocol::*;
use crate::common::*;
use crate::config::Config;
#[derive(Clone, PartialEq, Debug)]
pub struct ProtocolResponsePeer<I> {
pub ip_address: I,
pub port: Port,
}
impl<I: Ip> ProtocolResponsePeer<I> {
#[inline(always)]
fn from_peer(peer: &Peer<I>) -> Self {
Self {
ip_address: peer.ip_address,
port: peer.port,
}
}
}
pub struct ProtocolAnnounceResponse<I> {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
pub peers: Vec<ProtocolResponsePeer<I>>,
}
impl Into<ConnectedResponse> for ProtocolAnnounceResponse<Ipv4Addr> {
fn into(self) -> ConnectedResponse {
ConnectedResponse::AnnounceIpv4(AnnounceResponseIpv4 {
transaction_id: self.transaction_id,
announce_interval: self.announce_interval,
leechers: self.leechers,
seeders: self.seeders,
peers: self
.peers
.into_iter()
.map(|peer| ResponsePeerIpv4 {
ip_address: peer.ip_address,
port: peer.port,
})
.collect(),
})
}
}
impl Into<ConnectedResponse> for ProtocolAnnounceResponse<Ipv6Addr> {
fn into(self) -> ConnectedResponse {
ConnectedResponse::AnnounceIpv6(AnnounceResponseIpv6 {
transaction_id: self.transaction_id,
announce_interval: self.announce_interval,
leechers: self.leechers,
seeders: self.seeders,
peers: self
.peers
.into_iter()
.map(|peer| ResponsePeerIpv6 {
ip_address: peer.ip_address,
port: peer.port,
})
.collect(),
})
}
}
pub fn run_request_worker(
config: Config,
state: State,
request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>,
response_sender: ConnectedResponseSender,
worker_index: RequestWorkerIndex,
) {
let mut torrents = TorrentMaps::default();
let mut small_rng = SmallRng::from_entropy();
let timeout = Duration::from_millis(config.handlers.channel_recv_timeout_ms);
let mut peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age);
let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval);
let statistics_update_interval = Duration::from_secs(config.statistics.interval);
let mut last_cleaning = Instant::now();
let mut last_statistics_update = Instant::now();
let mut iter_counter = 0usize;
loop {
if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) {
let response = match request {
ConnectedRequest::Announce(request) => handle_announce_request(
&config,
&mut small_rng,
&mut torrents,
request,
src,
peer_valid_until,
),
ConnectedRequest::Scrape(request) => {
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents, src, request))
}
};
response_sender.try_send_to(sender_index, response, src);
}
if iter_counter % 128 == 0 {
peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age);
let now = Instant::now();
if now > last_cleaning + cleaning_interval {
torrents.clean(&config, &state.access_list);
if !statistics_update_interval.is_zero() {
let peers_ipv4 = torrents.ipv4.values().map(|t| t.peers.len()).sum();
let peers_ipv6 = torrents.ipv6.values().map(|t| t.peers.len()).sum();
state.statistics.peers_ipv4[worker_index.0]
.store(peers_ipv4, Ordering::Release);
state.statistics.peers_ipv6[worker_index.0]
.store(peers_ipv6, Ordering::Release);
}
last_cleaning = now;
}
if !statistics_update_interval.is_zero()
&& now > last_statistics_update + statistics_update_interval
{
state.statistics.torrents_ipv4[worker_index.0]
.store(torrents.ipv4.len(), Ordering::Release);
state.statistics.torrents_ipv6[worker_index.0]
.store(torrents.ipv6.len(), Ordering::Release);
last_statistics_update = now;
}
}
iter_counter = iter_counter.wrapping_add(1);
}
}
pub fn handle_announce_request(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMaps,
request: AnnounceRequest,
src: SocketAddr,
peer_valid_until: ValidUntil,
) -> ConnectedResponse {
match src.ip() {
IpAddr::V4(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv4,
request,
ip,
peer_valid_until,
)
.into(),
IpAddr::V6(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv6,
request,
ip,
peer_valid_until,
)
.into(),
}
}
fn handle_announce_request_inner<I: Ip>(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMap<I>,
request: AnnounceRequest,
peer_ip: I,
peer_valid_until: ValidUntil,
) -> ProtocolAnnounceResponse<I> {
let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
let peer = Peer {
ip_address: peer_ip,
port: request.port,
status: peer_status,
valid_until: peer_valid_until,
};
let torrent_data = torrents.entry(request.info_hash).or_default();
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id),
};
match opt_removed_peer.map(|peer| peer.status) {
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers -= 1;
}
Some(PeerStatus::Seeding) => {
torrent_data.num_seeders -= 1;
}
_ => {}
}
let max_num_peers_to_take = calc_max_num_peers_to_take(config, request.peers_wanted.0);
let response_peers = extract_response_peers(
rng,
&torrent_data.peers,
max_num_peers_to_take,
request.peer_id,
ProtocolResponsePeer::from_peer,
);
ProtocolAnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.protocol.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers as i32),
seeders: NumberOfPeers(torrent_data.num_seeders as i32),
peers: response_peers,
}
}
#[inline]
fn calc_max_num_peers_to_take(config: &Config, peers_wanted: i32) -> usize {
if peers_wanted <= 0 {
config.protocol.max_response_peers as usize
} else {
::std::cmp::min(
config.protocol.max_response_peers as usize,
peers_wanted as usize,
)
}
}
pub fn handle_scrape_request(
torrents: &mut TorrentMaps,
src: SocketAddr,
request: PendingScrapeRequest,
) -> PendingScrapeResponse {
const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0);
let mut torrent_stats: BTreeMap<usize, TorrentScrapeStatistics> = BTreeMap::new();
if src.ip().is_ipv4() {
torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| {
let s = if let Some(torrent_data) = torrents.ipv4.get(&info_hash) {
create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
)
} else {
EMPTY_STATS
};
(i, s)
}));
} else {
torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| {
let s = if let Some(torrent_data) = torrents.ipv6.get(&info_hash) {
create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
)
} else {
EMPTY_STATS
};
(i, s)
}));
}
PendingScrapeResponse {
transaction_id: request.transaction_id,
torrent_stats,
}
}
#[inline(always)]
const fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(0), // No implementation planned
leechers: NumberOfPeers(leechers),
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::net::Ipv4Addr;
use quickcheck::{quickcheck, TestResult};
use rand::thread_rng;
use super::*;
fn gen_peer_id(i: u32) -> PeerId {
let mut peer_id = PeerId([0; 20]);
peer_id.0[0..4].copy_from_slice(&i.to_ne_bytes());
peer_id
}
fn gen_peer(i: u32) -> Peer<Ipv4Addr> {
Peer {
ip_address: Ipv4Addr::from(i.to_be_bytes()),
port: Port(1),
status: PeerStatus::Leeching,
valid_until: ValidUntil::new(0),
}
}
#[test]
fn test_extract_response_peers() {
fn prop(data: (u16, u16)) -> TestResult {
let gen_num_peers = data.0 as u32;
let req_num_peers = data.1 as usize;
let mut peer_map: PeerMap<Ipv4Addr> = Default::default();
let mut opt_sender_key = None;
let mut opt_sender_peer = None;
for i in 0..gen_num_peers {
let key = gen_peer_id(i);
let peer = gen_peer((i << 16) + i);
if i == 0 {
opt_sender_key = Some(key);
opt_sender_peer = Some(ProtocolResponsePeer::from_peer(&peer));
}
peer_map.insert(key, peer);
}
let mut rng = thread_rng();
let peers = extract_response_peers(
&mut rng,
&peer_map,
req_num_peers,
opt_sender_key.unwrap_or_else(|| gen_peer_id(1)),
ProtocolResponsePeer::from_peer,
);
// Check that number of returned peers is correct
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize
|| peers.len() + 1 == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap) and that sender
// isn't returned
let mut ip_addresses = HashSet::with_capacity(peers.len());
for peer in peers {
if peer == opt_sender_peer.clone().unwrap()
|| ip_addresses.contains(&peer.ip_address)
{
success = false;
break;
}
ip_addresses.insert(peer.ip_address);
}
TestResult::from_bool(success)
}
quickcheck(prop as fn((u16, u16)) -> TestResult);
}
}

View file

@ -1,22 +1,163 @@
use cfg_if::cfg_if;
pub mod common; pub mod common;
pub mod config; pub mod config;
#[cfg(all(feature = "with-glommio", target_os = "linux"))] pub mod handlers;
pub mod glommio; pub mod network;
#[cfg(feature = "with-mio")] pub mod tasks;
pub mod mio;
use config::Config; use config::Config;
use std::collections::BTreeMap;
use std::sync::{atomic::AtomicUsize, Arc};
use std::thread::Builder;
use std::time::Duration;
use anyhow::Context;
#[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
use aquatic_common::privileges::drop_privileges_after_socket_binding;
use crossbeam_channel::unbounded;
use aquatic_common::access_list::update_access_list;
use signal_hook::consts::SIGUSR1;
use signal_hook::iterator::Signals;
use common::{ConnectedRequestSender, ConnectedResponseSender, SocketWorkerIndex, State};
use crate::common::RequestWorkerIndex;
pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker"; pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker";
pub fn run(config: Config) -> ::anyhow::Result<()> { pub fn run(config: Config) -> ::anyhow::Result<()> {
cfg_if! { let state = State::new(config.request_workers);
if #[cfg(all(feature = "with-glommio", target_os = "linux"))] {
glommio::run(config) update_access_list(&config.access_list, &state.access_list)?;
} else {
mio::run(config) let mut signals = Signals::new(::std::iter::once(SIGUSR1))?;
let num_bound_sockets = Arc::new(AtomicUsize::new(0));
let mut request_senders = Vec::new();
let mut request_receivers = BTreeMap::new();
let mut response_senders = Vec::new();
let mut response_receivers = BTreeMap::new();
for i in 0..config.request_workers {
let (request_sender, request_receiver) = unbounded();
request_senders.push(request_sender);
request_receivers.insert(i, request_receiver);
}
for i in 0..config.socket_workers {
let (response_sender, response_receiver) = unbounded();
response_senders.push(response_sender);
response_receivers.insert(i, response_receiver);
}
for i in 0..config.request_workers {
let config = config.clone();
let state = state.clone();
let request_receiver = request_receivers.remove(&i).unwrap().clone();
let response_sender = ConnectedResponseSender::new(response_senders.clone());
Builder::new()
.name(format!("request-{:02}", i + 1))
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::RequestWorker(i),
);
handlers::run_request_worker(
config,
state,
request_receiver,
response_sender,
RequestWorkerIndex(i),
)
})
.with_context(|| "spawn request worker")?;
}
for i in 0..config.socket_workers {
let state = state.clone();
let config = config.clone();
let request_sender =
ConnectedRequestSender::new(SocketWorkerIndex(i), request_senders.clone());
let response_receiver = response_receivers.remove(&i).unwrap();
let num_bound_sockets = num_bound_sockets.clone();
Builder::new()
.name(format!("socket-{:02}", i + 1))
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::SocketWorker(i),
);
network::run_socket_worker(
state,
config,
i,
request_sender,
response_receiver,
num_bound_sockets,
);
})
.with_context(|| "spawn socket worker")?;
}
if config.statistics.interval != 0 {
let state = state.clone();
let config = config.clone();
Builder::new()
.name("statistics-collector".to_string())
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
loop {
::std::thread::sleep(Duration::from_secs(config.statistics.interval));
tasks::gather_and_print_statistics(&state, &config);
}
})
.with_context(|| "spawn statistics worker")?;
}
drop_privileges_after_socket_binding(
&config.privileges,
num_bound_sockets,
config.socket_workers,
)
.unwrap();
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
for signal in &mut signals {
match signal {
SIGUSR1 => {
let _ = update_access_list(&config.access_list, &state.access_list);
}
_ => unreachable!(),
} }
} }
Ok(())
} }

View file

@ -1,30 +0,0 @@
use aquatic_common::access_list::AccessListArcSwap;
use parking_lot::Mutex;
use std::sync::{atomic::AtomicUsize, Arc};
use crate::common::*;
#[derive(Default)]
pub struct Statistics {
pub requests_received: AtomicUsize,
pub responses_sent: AtomicUsize,
pub bytes_received: AtomicUsize,
pub bytes_sent: AtomicUsize,
}
#[derive(Clone)]
pub struct State {
pub access_list: Arc<AccessListArcSwap>,
pub torrents: Arc<Mutex<TorrentMaps>>,
pub statistics: Arc<Statistics>,
}
impl Default for State {
fn default() -> Self {
Self {
access_list: Arc::new(AccessListArcSwap::default()),
torrents: Arc::new(Mutex::new(TorrentMaps::default())),
statistics: Arc::new(Statistics::default()),
}
}
}

View file

@ -1,98 +0,0 @@
use std::net::SocketAddr;
use std::time::Duration;
use aquatic_common::ValidUntil;
use crossbeam_channel::{Receiver, Sender};
use rand::{rngs::SmallRng, SeedableRng};
use aquatic_udp_protocol::*;
use crate::common::handlers::*;
use crate::config::Config;
use crate::mio::common::*;
pub fn run_request_worker(
state: State,
config: Config,
request_receiver: Receiver<(ConnectedRequest, SocketAddr)>,
response_sender: Sender<(ConnectedResponse, SocketAddr)>,
) {
let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::new();
let mut scrape_requests: Vec<(ScrapeRequest, SocketAddr)> = Vec::new();
let mut responses: Vec<(ConnectedResponse, SocketAddr)> = Vec::new();
let mut small_rng = SmallRng::from_entropy();
let timeout = Duration::from_micros(config.handlers.channel_recv_timeout_microseconds);
loop {
let mut opt_torrents = None;
// Collect requests from channel, divide them by type
//
// Collect a maximum number of request. Stop collecting before that
// number is reached if having waited for too long for a request, but
// only if TorrentMaps mutex isn't locked.
for i in 0..config.handlers.max_requests_per_iter {
let (request, src): (ConnectedRequest, SocketAddr) = if i == 0 {
match request_receiver.recv() {
Ok(r) => r,
Err(_) => break, // Really shouldn't happen
}
} else {
match request_receiver.recv_timeout(timeout) {
Ok(r) => r,
Err(_) => {
if let Some(guard) = state.torrents.try_lock() {
opt_torrents = Some(guard);
break;
} else {
continue;
}
}
}
};
match request {
ConnectedRequest::Announce(request) => announce_requests.push((request, src)),
ConnectedRequest::Scrape { request, .. } => scrape_requests.push((request, src)),
}
}
// Generate responses for announce and scrape requests, then drop MutexGuard.
{
let mut torrents = opt_torrents.unwrap_or_else(|| state.torrents.lock());
let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age);
responses.extend(announce_requests.drain(..).map(|(request, src)| {
let response = handle_announce_request(
&config,
&mut small_rng,
&mut torrents,
request,
src,
peer_valid_until,
);
(ConnectedResponse::Announce(response), src)
}));
responses.extend(scrape_requests.drain(..).map(|(request, src)| {
let response = ConnectedResponse::Scrape {
response: handle_scrape_request(&mut torrents, src, request),
original_indices: Vec::new(),
};
(response, src)
}));
}
for r in responses.drain(..) {
if let Err(err) = response_sender.send(r) {
::log::error!("error sending response to channel: {}", err);
}
}
}
}

View file

@ -1,157 +0,0 @@
use std::sync::{atomic::AtomicUsize, Arc};
use std::thread::Builder;
use std::time::Duration;
use anyhow::Context;
#[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
use aquatic_common::privileges::drop_privileges_after_socket_binding;
use crossbeam_channel::unbounded;
use aquatic_common::access_list::update_access_list;
use signal_hook::consts::SIGUSR1;
use signal_hook::iterator::Signals;
use crate::config::Config;
pub mod common;
pub mod handlers;
pub mod network;
pub mod tasks;
use common::State;
pub fn run(config: Config) -> ::anyhow::Result<()> {
let state = State::default();
update_access_list(&config.access_list, &state.access_list)?;
let mut signals = Signals::new(::std::iter::once(SIGUSR1))?;
{
let config = config.clone();
let state = state.clone();
::std::thread::spawn(move || run_inner(config, state));
}
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
for signal in &mut signals {
match signal {
SIGUSR1 => {
let _ = update_access_list(&config.access_list, &state.access_list);
}
_ => unreachable!(),
}
}
Ok(())
}
pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> {
let num_bound_sockets = Arc::new(AtomicUsize::new(0));
let (request_sender, request_receiver) = unbounded();
let (response_sender, response_receiver) = unbounded();
for i in 0..config.request_workers {
let state = state.clone();
let config = config.clone();
let request_receiver = request_receiver.clone();
let response_sender = response_sender.clone();
Builder::new()
.name(format!("request-{:02}", i + 1))
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::RequestWorker(i),
);
handlers::run_request_worker(state, config, request_receiver, response_sender)
})
.with_context(|| "spawn request worker")?;
}
for i in 0..config.socket_workers {
let state = state.clone();
let config = config.clone();
let request_sender = request_sender.clone();
let response_receiver = response_receiver.clone();
let num_bound_sockets = num_bound_sockets.clone();
Builder::new()
.name(format!("socket-{:02}", i + 1))
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::SocketWorker(i),
);
network::run_socket_worker(
state,
config,
i,
request_sender,
response_receiver,
num_bound_sockets,
)
})
.with_context(|| "spawn socket worker")?;
}
if config.statistics.interval != 0 {
let state = state.clone();
let config = config.clone();
Builder::new()
.name("statistics-collector".to_string())
.spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
loop {
::std::thread::sleep(Duration::from_secs(config.statistics.interval));
tasks::gather_and_print_statistics(&state, &config);
}
})
.with_context(|| "spawn statistics worker")?;
}
drop_privileges_after_socket_binding(
&config.privileges,
num_bound_sockets,
config.socket_workers,
)
.unwrap();
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
loop {
::std::thread::sleep(Duration::from_secs(
config.cleaning.torrent_cleaning_interval,
));
state.torrents.lock().clean(&config, &state.access_list);
}
}

View file

@ -1,329 +0,0 @@
use std::io::{Cursor, ErrorKind};
use std::net::{IpAddr, SocketAddr};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::{Duration, Instant};
use std::vec::Drain;
use aquatic_common::access_list::create_access_list_cache;
use crossbeam_channel::{Receiver, Sender};
use mio::net::UdpSocket;
use mio::{Events, Interest, Poll, Token};
use rand::prelude::{Rng, SeedableRng, StdRng};
use socket2::{Domain, Protocol, Socket, Type};
use aquatic_udp_protocol::{IpVersion, Request, Response};
use crate::common::handlers::*;
use crate::common::network::ConnectionMap;
use crate::common::*;
use crate::config::Config;
use super::common::*;
pub fn run_socket_worker(
state: State,
config: Config,
token_num: usize,
request_sender: Sender<(ConnectedRequest, SocketAddr)>,
response_receiver: Receiver<(ConnectedResponse, SocketAddr)>,
num_bound_sockets: Arc<AtomicUsize>,
) {
let mut rng = StdRng::from_entropy();
let mut buffer = [0u8; MAX_PACKET_SIZE];
let mut socket = UdpSocket::from_std(create_socket(&config));
let mut poll = Poll::new().expect("create poll");
let interests = Interest::READABLE;
poll.registry()
.register(&mut socket, Token(token_num), interests)
.unwrap();
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let mut events = Events::with_capacity(config.network.poll_event_capacity);
let mut connections = ConnectionMap::default();
let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new();
let timeout = Duration::from_millis(50);
let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval);
let mut iter_counter = 0usize;
let mut last_cleaning = Instant::now();
loop {
poll.poll(&mut events, Some(timeout))
.expect("failed polling");
for event in events.iter() {
let token = event.token();
if (token.0 == token_num) & event.is_readable() {
read_requests(
&config,
&state,
&mut connections,
&mut rng,
&mut socket,
&mut buffer,
&request_sender,
&mut local_responses,
);
}
}
send_responses(
&state,
&config,
&mut socket,
&mut buffer,
&response_receiver,
local_responses.drain(..),
);
if iter_counter % 32 == 0 {
let now = Instant::now();
if now > last_cleaning + cleaning_duration {
connections.clean();
last_cleaning = now;
}
}
iter_counter = iter_counter.wrapping_add(1);
}
}
fn create_socket(config: &Config) -> ::std::net::UdpSocket {
let socket = if config.network.address.is_ipv4() {
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
} else {
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
}
.expect("create socket");
socket.set_reuse_port(true).expect("socket: set reuse port");
socket
.set_nonblocking(true)
.expect("socket: set nonblocking");
socket
.bind(&config.network.address.into())
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err));
let recv_buffer_size = config.network.socket_recv_buffer_size;
if recv_buffer_size != 0 {
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
::log::error!(
"socket: failed setting recv buffer to {}: {:?}",
recv_buffer_size,
err
);
}
}
socket.into()
}
#[inline]
fn read_requests(
config: &Config,
state: &State,
connections: &mut ConnectionMap,
rng: &mut StdRng,
socket: &mut UdpSocket,
buffer: &mut [u8],
request_sender: &Sender<(ConnectedRequest, SocketAddr)>,
local_responses: &mut Vec<(Response, SocketAddr)>,
) {
let mut requests_received: usize = 0;
let mut bytes_received: usize = 0;
let valid_until = ValidUntil::new(config.cleaning.max_connection_age);
let access_list_mode = config.access_list.mode;
let mut access_list_cache = create_access_list_cache(&state.access_list);
loop {
match socket.recv_from(&mut buffer[..]) {
Ok((amt, src)) => {
let request =
Request::from_bytes(&buffer[..amt], config.protocol.max_scrape_torrents);
bytes_received += amt;
if request.is_ok() {
requests_received += 1;
}
match request {
Ok(Request::Connect(request)) => {
let connection_id = ConnectionId(rng.gen());
connections.insert(connection_id, src, valid_until);
let response = Response::Connect(ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
});
local_responses.push((response, src))
}
Ok(Request::Announce(request)) => {
if connections.contains(request.connection_id, src) {
if access_list_cache
.load()
.allows(access_list_mode, &request.info_hash.0)
{
if let Err(err) = request_sender
.try_send((ConnectedRequest::Announce(request), src))
{
::log::warn!("request_sender.try_send failed: {:?}", err)
}
} else {
let response = Response::Error(ErrorResponse {
transaction_id: request.transaction_id,
message: "Info hash not allowed".into(),
});
local_responses.push((response, src))
}
}
}
Ok(Request::Scrape(request)) => {
if connections.contains(request.connection_id, src) {
let request = ConnectedRequest::Scrape {
request,
original_indices: Vec::new(),
};
if let Err(err) = request_sender.try_send((request, src)) {
::log::warn!("request_sender.try_send failed: {:?}", err)
}
}
}
Err(err) => {
::log::debug!("Request::from_bytes error: {:?}", err);
if let RequestParseError::Sendable {
connection_id,
transaction_id,
err,
} = err
{
if connections.contains(connection_id, src) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
};
local_responses.push((response.into(), src));
}
}
}
}
}
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
::log::info!("recv_from error: {}", err);
}
}
}
if config.statistics.interval != 0 {
state
.statistics
.requests_received
.fetch_add(requests_received, Ordering::SeqCst);
state
.statistics
.bytes_received
.fetch_add(bytes_received, Ordering::SeqCst);
}
}
#[inline]
fn send_responses(
state: &State,
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>,
local_responses: Drain<(Response, SocketAddr)>,
) {
let mut responses_sent: usize = 0;
let mut bytes_sent: usize = 0;
let mut cursor = Cursor::new(buffer);
let response_iterator = local_responses.into_iter().chain(
response_receiver
.try_iter()
.map(|(response, addr)| (response.into(), addr)),
);
for (response, src) in response_iterator {
cursor.set_position(0);
let ip_version = ip_version_from_ip(src.ip());
match response.write(&mut cursor, ip_version) {
Ok(()) => {
let amt = cursor.position() as usize;
match socket.send_to(&cursor.get_ref()[..amt], src) {
Ok(amt) => {
responses_sent += 1;
bytes_sent += amt;
}
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
::log::info!("send_to error: {}", err);
}
}
}
Err(err) => {
::log::error!("Response::write error: {:?}", err);
}
}
}
if config.statistics.interval != 0 {
state
.statistics
.responses_sent
.fetch_add(responses_sent, Ordering::SeqCst);
state
.statistics
.bytes_sent
.fetch_add(bytes_sent, Ordering::SeqCst);
}
}
fn ip_version_from_ip(ip: IpAddr) -> IpVersion {
match ip {
IpAddr::V4(_) => IpVersion::IPv4,
IpAddr::V6(ip) => {
if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() {
IpVersion::IPv4
} else {
IpVersion::IPv6
}
}
}
}

View file

@ -1,96 +0,0 @@
use std::sync::atomic::Ordering;
use histogram::Histogram;
use super::common::*;
use crate::config::Config;
pub fn gather_and_print_statistics(state: &State, config: &Config) {
let interval = config.statistics.interval;
let requests_received: f64 = state
.statistics
.requests_received
.fetch_and(0, Ordering::SeqCst) as f64;
let responses_sent: f64 = state
.statistics
.responses_sent
.fetch_and(0, Ordering::SeqCst) as f64;
let bytes_received: f64 = state
.statistics
.bytes_received
.fetch_and(0, Ordering::SeqCst) as f64;
let bytes_sent: f64 = state.statistics.bytes_sent.fetch_and(0, Ordering::SeqCst) as f64;
let requests_per_second = requests_received / interval as f64;
let responses_per_second: f64 = responses_sent / interval as f64;
let bytes_received_per_second: f64 = bytes_received / interval as f64;
let bytes_sent_per_second: f64 = bytes_sent / interval as f64;
println!(
"stats: {:.2} requests/second, {:.2} responses/second",
requests_per_second, responses_per_second
);
println!(
"bandwidth: {:7.2} Mbit/s in, {:7.2} Mbit/s out",
bytes_received_per_second * 8.0 / 1_000_000.0,
bytes_sent_per_second * 8.0 / 1_000_000.0,
);
let mut total_num_torrents_ipv4 = 0usize;
let mut total_num_torrents_ipv6 = 0usize;
let mut total_num_peers_ipv4 = 0usize;
let mut total_num_peers_ipv6 = 0usize;
let mut peers_per_torrent = Histogram::new();
{
let torrents = &mut state.torrents.lock();
for torrent in torrents.ipv4.values() {
let num_peers = torrent.num_seeders + torrent.num_leechers;
if let Err(err) = peers_per_torrent.increment(num_peers as u64) {
::log::error!("error incrementing peers_per_torrent histogram: {}", err)
}
total_num_peers_ipv4 += num_peers;
}
for torrent in torrents.ipv6.values() {
let num_peers = torrent.num_seeders + torrent.num_leechers;
if let Err(err) = peers_per_torrent.increment(num_peers as u64) {
::log::error!("error incrementing peers_per_torrent histogram: {}", err)
}
total_num_peers_ipv6 += num_peers;
}
total_num_torrents_ipv4 += torrents.ipv4.len();
total_num_torrents_ipv6 += torrents.ipv6.len();
}
println!(
"ipv4 torrents: {}, peers: {}; ipv6 torrents: {}, peers: {}",
total_num_torrents_ipv4,
total_num_peers_ipv4,
total_num_torrents_ipv6,
total_num_peers_ipv6,
);
if peers_per_torrent.entries() != 0 {
println!(
"peers per torrent: min: {}, p50: {}, p75: {}, p90: {}, p99: {}, p999: {}, max: {}",
peers_per_torrent.minimum().unwrap(),
peers_per_torrent.percentile(50.0).unwrap(),
peers_per_torrent.percentile(75.0).unwrap(),
peers_per_torrent.percentile(90.0).unwrap(),
peers_per_torrent.percentile(99.0).unwrap(),
peers_per_torrent.percentile(99.9).unwrap(),
peers_per_torrent.maximum().unwrap(),
);
}
println!();
}

View file

@ -0,0 +1,545 @@
use std::collections::BTreeMap;
use std::io::{Cursor, ErrorKind};
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::{Duration, Instant};
use std::vec::Drain;
use crossbeam_channel::Receiver;
use mio::net::UdpSocket;
use mio::{Events, Interest, Poll, Token};
use rand::prelude::{Rng, SeedableRng, StdRng};
use aquatic_common::access_list::create_access_list_cache;
use aquatic_common::access_list::AccessListCache;
use aquatic_common::AHashIndexMap;
use aquatic_common::ValidUntil;
use aquatic_udp_protocol::*;
use socket2::{Domain, Protocol, Socket, Type};
use crate::common::*;
use crate::config::Config;
#[derive(Default)]
pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>);
impl ConnectionMap {
pub fn insert(
&mut self,
connection_id: ConnectionId,
socket_addr: SocketAddr,
valid_until: ValidUntil,
) {
self.0.insert((connection_id, socket_addr), valid_until);
}
pub fn contains(&self, connection_id: ConnectionId, socket_addr: SocketAddr) -> bool {
self.0.contains_key(&(connection_id, socket_addr))
}
pub fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.0 > now);
self.0.shrink_to_fit();
}
}
pub struct PendingScrapeResponseMeta {
num_pending: usize,
valid_until: ValidUntil,
}
#[derive(Default)]
pub struct PendingScrapeResponseMap(
AHashIndexMap<TransactionId, (PendingScrapeResponseMeta, PendingScrapeResponse)>,
);
impl PendingScrapeResponseMap {
pub fn prepare(
&mut self,
transaction_id: TransactionId,
num_pending: usize,
valid_until: ValidUntil,
) {
let meta = PendingScrapeResponseMeta {
num_pending,
valid_until,
};
let response = PendingScrapeResponse {
transaction_id,
torrent_stats: BTreeMap::new(),
};
self.0.insert(transaction_id, (meta, response));
}
pub fn add_and_get_finished(&mut self, response: PendingScrapeResponse) -> Option<Response> {
let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) {
r.0.num_pending -= 1;
r.1.torrent_stats.extend(response.torrent_stats.into_iter());
r.0.num_pending == 0
} else {
::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map");
false
};
if finished {
let response = self.0.remove(&response.transaction_id).unwrap().1;
Some(Response::Scrape(ScrapeResponse {
transaction_id: response.transaction_id,
torrent_stats: response.torrent_stats.into_values().collect(),
}))
} else {
None
}
}
pub fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.0.valid_until.0 > now);
self.0.shrink_to_fit();
}
}
pub fn run_socket_worker(
state: State,
config: Config,
token_num: usize,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, SocketAddr)>,
num_bound_sockets: Arc<AtomicUsize>,
) {
let mut rng = StdRng::from_entropy();
let mut buffer = [0u8; MAX_PACKET_SIZE];
let mut socket = UdpSocket::from_std(create_socket(&config));
let mut poll = Poll::new().expect("create poll");
let interests = Interest::READABLE;
poll.registry()
.register(&mut socket, Token(token_num), interests)
.unwrap();
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let mut events = Events::with_capacity(config.network.poll_event_capacity);
let mut connections = ConnectionMap::default();
let mut pending_scrape_responses = PendingScrapeResponseMap::default();
let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new();
let poll_timeout = Duration::from_millis(config.network.poll_timeout_ms);
let connection_cleaning_duration =
Duration::from_secs(config.cleaning.connection_cleaning_interval);
let pending_scrape_cleaning_duration =
Duration::from_secs(config.cleaning.pending_scrape_cleaning_interval);
let mut connection_valid_until = ValidUntil::new(config.cleaning.max_connection_age);
let mut pending_scrape_valid_until = ValidUntil::new(config.cleaning.max_pending_scrape_age);
let mut last_connection_cleaning = Instant::now();
let mut last_pending_scrape_cleaning = Instant::now();
let mut iter_counter = 0usize;
loop {
poll.poll(&mut events, Some(poll_timeout))
.expect("failed polling");
for event in events.iter() {
let token = event.token();
if (token.0 == token_num) & event.is_readable() {
read_requests(
&config,
&state,
&mut connections,
&mut pending_scrape_responses,
&mut rng,
&mut socket,
&mut buffer,
&request_sender,
&mut local_responses,
connection_valid_until,
pending_scrape_valid_until,
);
}
}
send_responses(
&state,
&config,
&mut socket,
&mut buffer,
&response_receiver,
&mut pending_scrape_responses,
local_responses.drain(..),
);
// Run periodic ValidUntil updates and state cleaning
if iter_counter % 128 == 0 {
let now = Instant::now();
connection_valid_until =
ValidUntil::new_with_now(now, config.cleaning.max_connection_age);
pending_scrape_valid_until =
ValidUntil::new_with_now(now, config.cleaning.max_pending_scrape_age);
if now > last_connection_cleaning + connection_cleaning_duration {
connections.clean();
last_connection_cleaning = now;
}
if now > last_pending_scrape_cleaning + pending_scrape_cleaning_duration {
pending_scrape_responses.clean();
last_pending_scrape_cleaning = now;
}
}
iter_counter = iter_counter.wrapping_add(1);
}
}
#[inline]
fn read_requests(
config: &Config,
state: &State,
connections: &mut ConnectionMap,
pending_scrape_responses: &mut PendingScrapeResponseMap,
rng: &mut StdRng,
socket: &mut UdpSocket,
buffer: &mut [u8],
request_sender: &ConnectedRequestSender,
local_responses: &mut Vec<(Response, SocketAddr)>,
connection_valid_until: ValidUntil,
pending_scrape_valid_until: ValidUntil,
) {
let mut requests_received: usize = 0;
let mut bytes_received: usize = 0;
let mut access_list_cache = create_access_list_cache(&state.access_list);
loop {
match socket.recv_from(&mut buffer[..]) {
Ok((amt, src)) => {
let res_request =
Request::from_bytes(&buffer[..amt], config.protocol.max_scrape_torrents);
bytes_received += amt;
if res_request.is_ok() {
requests_received += 1;
}
let src = match src {
SocketAddr::V6(src) => {
match src.ip().octets() {
// Convert IPv4-mapped address (available in std but nightly-only)
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(a, b, c, d),
src.port(),
))
}
_ => src.into(),
}
}
src => src,
};
handle_request(
config,
connections,
pending_scrape_responses,
&mut access_list_cache,
rng,
request_sender,
local_responses,
connection_valid_until,
pending_scrape_valid_until,
res_request,
src,
);
}
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
::log::info!("recv_from error: {}", err);
}
}
}
if config.statistics.interval != 0 {
state
.statistics
.requests_received
.fetch_add(requests_received, Ordering::Release);
state
.statistics
.bytes_received
.fetch_add(bytes_received, Ordering::Release);
}
}
pub fn handle_request(
config: &Config,
connections: &mut ConnectionMap,
pending_scrape_responses: &mut PendingScrapeResponseMap,
access_list_cache: &mut AccessListCache,
rng: &mut StdRng,
request_sender: &ConnectedRequestSender,
local_responses: &mut Vec<(Response, SocketAddr)>,
connection_valid_until: ValidUntil,
pending_scrape_valid_until: ValidUntil,
res_request: Result<Request, RequestParseError>,
src: SocketAddr,
) {
let access_list_mode = config.access_list.mode;
match res_request {
Ok(Request::Connect(request)) => {
let connection_id = ConnectionId(rng.gen());
connections.insert(connection_id, src, connection_valid_until);
let response = Response::Connect(ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
});
local_responses.push((response, src))
}
Ok(Request::Announce(request)) => {
if connections.contains(request.connection_id, src) {
if access_list_cache
.load()
.allows(access_list_mode, &request.info_hash.0)
{
let worker_index =
RequestWorkerIndex::from_info_hash(config, request.info_hash);
request_sender.try_send_to(
worker_index,
ConnectedRequest::Announce(request),
src,
);
} else {
let response = Response::Error(ErrorResponse {
transaction_id: request.transaction_id,
message: "Info hash not allowed".into(),
});
local_responses.push((response, src))
}
}
}
Ok(Request::Scrape(request)) => {
if connections.contains(request.connection_id, src) {
let mut requests: AHashIndexMap<RequestWorkerIndex, PendingScrapeRequest> =
Default::default();
let transaction_id = request.transaction_id;
for (i, info_hash) in request.info_hashes.into_iter().enumerate() {
let pending = requests
.entry(RequestWorkerIndex::from_info_hash(&config, info_hash))
.or_insert_with(|| PendingScrapeRequest {
transaction_id,
info_hashes: BTreeMap::new(),
});
pending.info_hashes.insert(i, info_hash);
}
pending_scrape_responses.prepare(
transaction_id,
requests.len(),
pending_scrape_valid_until,
);
for (request_worker_index, request) in requests {
request_sender.try_send_to(
request_worker_index,
ConnectedRequest::Scrape(request),
src,
);
}
}
}
Err(err) => {
::log::debug!("Request::from_bytes error: {:?}", err);
if let RequestParseError::Sendable {
connection_id,
transaction_id,
err,
} = err
{
if connections.contains(connection_id, src) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
};
local_responses.push((response.into(), src));
}
}
}
}
}
#[inline]
fn send_responses(
state: &State,
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>,
pending_scrape_responses: &mut PendingScrapeResponseMap,
local_responses: Drain<(Response, SocketAddr)>,
) {
let mut responses_sent: usize = 0;
let mut bytes_sent: usize = 0;
for (response, addr) in local_responses {
send_response(
config,
socket,
buffer,
&mut responses_sent,
&mut bytes_sent,
response,
addr,
);
}
for (response, addr) in response_receiver.try_iter() {
let opt_response = match response {
ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r),
ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)),
ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)),
};
if let Some(response) = opt_response {
send_response(
config,
socket,
buffer,
&mut responses_sent,
&mut bytes_sent,
response,
addr,
);
}
}
if config.statistics.interval != 0 {
state
.statistics
.responses_sent
.fetch_add(responses_sent, Ordering::Release);
state
.statistics
.bytes_sent
.fetch_add(bytes_sent, Ordering::Release);
}
}
fn send_response(
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
responses_sent: &mut usize,
bytes_sent: &mut usize,
response: Response,
addr: SocketAddr,
) {
let mut cursor = Cursor::new(buffer);
let addr = if config.network.address.is_ipv4() {
if let SocketAddr::V4(addr) = addr {
SocketAddr::V4(addr)
} else {
unreachable!()
}
} else {
match addr {
SocketAddr::V4(addr) => {
let ip = addr.ip().to_ipv6_mapped();
SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0))
}
addr => addr,
}
};
match response.write(&mut cursor) {
Ok(()) => {
let amt = cursor.position() as usize;
match socket.send_to(&cursor.get_ref()[..amt], addr) {
Ok(amt) => {
*responses_sent += 1;
*bytes_sent += amt;
}
Err(err) => {
::log::info!("send_to error: {}", err);
}
}
}
Err(err) => {
::log::error!("Response::write error: {:?}", err);
}
}
}
pub fn create_socket(config: &Config) -> ::std::net::UdpSocket {
let socket = if config.network.address.is_ipv4() {
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
} else {
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
}
.expect("create socket");
if config.network.only_ipv6 {
socket.set_only_v6(true).expect("socket: set only ipv6");
}
socket.set_reuse_port(true).expect("socket: set reuse port");
socket
.set_nonblocking(true)
.expect("socket: set nonblocking");
socket
.bind(&config.network.address.into())
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err));
let recv_buffer_size = config.network.socket_recv_buffer_size;
if recv_buffer_size != 0 {
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
::log::error!(
"socket: failed setting recv buffer to {}: {:?}",
recv_buffer_size,
err
);
}
}
socket.into()
}

View file

@ -0,0 +1,62 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use super::common::*;
use crate::config::Config;
pub fn gather_and_print_statistics(state: &State, config: &Config) {
let interval = config.statistics.interval;
let requests_received: f64 = state
.statistics
.requests_received
.fetch_and(0, Ordering::AcqRel) as f64;
let responses_sent: f64 = state
.statistics
.responses_sent
.fetch_and(0, Ordering::AcqRel) as f64;
let bytes_received: f64 = state
.statistics
.bytes_received
.fetch_and(0, Ordering::AcqRel) as f64;
let bytes_sent: f64 = state.statistics.bytes_sent.fetch_and(0, Ordering::AcqRel) as f64;
let requests_per_second = requests_received / interval as f64;
let responses_per_second: f64 = responses_sent / interval as f64;
let bytes_received_per_second: f64 = bytes_received / interval as f64;
let bytes_sent_per_second: f64 = bytes_sent / interval as f64;
let num_torrents_ipv4: usize = sum_atomic_usizes(&state.statistics.torrents_ipv4);
let num_torrents_ipv6 = sum_atomic_usizes(&state.statistics.torrents_ipv6);
let num_peers_ipv4 = sum_atomic_usizes(&state.statistics.peers_ipv4);
let num_peers_ipv6 = sum_atomic_usizes(&state.statistics.peers_ipv6);
let access_list_len = state.access_list.load().len();
println!(
"stats: {:.2} requests/second, {:.2} responses/second",
requests_per_second, responses_per_second
);
println!(
"bandwidth: {:7.2} Mbit/s in, {:7.2} Mbit/s out",
bytes_received_per_second * 8.0 / 1_000_000.0,
bytes_sent_per_second * 8.0 / 1_000_000.0,
);
println!(
"ipv4 torrents: {}, ipv6 torrents: {}",
num_torrents_ipv4, num_torrents_ipv6,
);
println!(
"ipv4 peers: {}, ipv6 peers: {} (both updated every {} seconds)",
num_peers_ipv4, num_peers_ipv6, config.cleaning.torrent_cleaning_interval
);
println!("access list entries: {}", access_list_len,);
println!();
}
fn sum_atomic_usizes(values: &[AtomicUsize]) -> usize {
values.iter().map(|n| n.load(Ordering::Acquire)).sum()
}

View file

@ -13,8 +13,9 @@ name = "aquatic_udp_bench"
anyhow = "1" anyhow = "1"
aquatic_cli_helpers = "0.1.0" aquatic_cli_helpers = "0.1.0"
aquatic_udp = "0.1.0" aquatic_udp = "0.1.0"
aquatic_udp_protocol = "0.1.0"
crossbeam-channel = "0.5" crossbeam-channel = "0.5"
indicatif = "0.16.2" indicatif = "0.16"
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
num-format = "0.4" num-format = "0.4"
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }

View file

@ -6,24 +6,22 @@ use indicatif::ProgressIterator;
use rand::Rng; use rand::Rng;
use rand_distr::Pareto; use rand_distr::Pareto;
use aquatic_udp::common::handlers::*;
use aquatic_udp::common::*; use aquatic_udp::common::*;
use aquatic_udp::config::Config; use aquatic_udp_protocol::*;
use crate::common::*; use crate::common::*;
use crate::config::BenchConfig; use crate::config::BenchConfig;
pub fn bench_announce_handler( pub fn bench_announce_handler(
bench_config: &BenchConfig, bench_config: &BenchConfig,
aquatic_config: &Config, request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>,
request_sender: &Sender<(ConnectedRequest, SocketAddr)>,
response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>,
rng: &mut impl Rng, rng: &mut impl Rng,
info_hashes: &[InfoHash], info_hashes: &[InfoHash],
) -> (usize, Duration) { ) -> (usize, Duration) {
let requests = create_requests(rng, info_hashes, bench_config.num_announce_requests); let requests = create_requests(rng, info_hashes, bench_config.num_announce_requests);
let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads; let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
let mut num_responses = 0usize; let mut num_responses = 0usize;
let mut dummy: u16 = rng.gen(); let mut dummy: u16 = rng.gen();
@ -38,11 +36,15 @@ pub fn bench_announce_handler(
for request_chunk in requests.chunks(p) { for request_chunk in requests.chunks(p) {
for (request, src) in request_chunk { for (request, src) in request_chunk {
request_sender request_sender
.send((ConnectedRequest::Announce(request.clone()), *src)) .send((
SocketWorkerIndex(0),
ConnectedRequest::Announce(request.clone()),
*src,
))
.unwrap(); .unwrap();
} }
while let Ok((ConnectedResponse::Announce(r), _)) = response_receiver.try_recv() { while let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.try_recv() {
num_responses += 1; num_responses += 1;
if let Some(last_peer) = r.peers.last() { if let Some(last_peer) = r.peers.last() {
@ -54,7 +56,7 @@ pub fn bench_announce_handler(
let total = bench_config.num_announce_requests * (round + 1); let total = bench_config.num_announce_requests * (round + 1);
while num_responses < total { while num_responses < total {
if let Ok((ConnectedResponse::Announce(r), _)) = response_receiver.recv() { if let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.recv() {
num_responses += 1; num_responses += 1;
if let Some(last_peer) = r.peers.last() { if let Some(last_peer) = r.peers.last() {

View file

@ -7,6 +7,7 @@
//! Scrape: 1 873 545 requests/second, 533.75 ns/request //! Scrape: 1 873 545 requests/second, 533.75 ns/request
//! ``` //! ```
use aquatic_udp::handlers::run_request_worker;
use crossbeam_channel::unbounded; use crossbeam_channel::unbounded;
use num_format::{Locale, ToFormattedString}; use num_format::{Locale, ToFormattedString};
use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng}; use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng};
@ -15,8 +16,7 @@ use std::time::Duration;
use aquatic_cli_helpers::run_app_with_cli_and_config; use aquatic_cli_helpers::run_app_with_cli_and_config;
use aquatic_udp::common::*; use aquatic_udp::common::*;
use aquatic_udp::config::Config; use aquatic_udp::config::Config;
use aquatic_udp::mio::common::*; use aquatic_udp_protocol::*;
use aquatic_udp::mio::handlers;
use config::BenchConfig; use config::BenchConfig;
@ -39,20 +39,27 @@ fn main() {
pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> {
// Setup common state, spawn request handlers // Setup common state, spawn request handlers
let state = State::default(); let mut aquatic_config = Config::default();
let aquatic_config = Config::default();
aquatic_config.cleaning.torrent_cleaning_interval = 60 * 60 * 24;
let (request_sender, request_receiver) = unbounded(); let (request_sender, request_receiver) = unbounded();
let (response_sender, response_receiver) = unbounded(); let (response_sender, response_receiver) = unbounded();
for _ in 0..bench_config.num_threads { let response_sender = ConnectedResponseSender::new(vec![response_sender]);
let state = state.clone();
{
let config = aquatic_config.clone(); let config = aquatic_config.clone();
let request_receiver = request_receiver.clone(); let state = State::new(config.request_workers);
let response_sender = response_sender.clone();
::std::thread::spawn(move || { ::std::thread::spawn(move || {
handlers::run_request_worker(state, config, request_receiver, response_sender) run_request_worker(
config,
state,
request_receiver,
response_sender,
RequestWorkerIndex(0),
)
}); });
} }
@ -63,7 +70,6 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> {
let a = announce::bench_announce_handler( let a = announce::bench_announce_handler(
&bench_config, &bench_config,
&aquatic_config,
&request_sender, &request_sender,
&response_receiver, &response_receiver,
&mut rng, &mut rng,
@ -72,7 +78,6 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> {
let s = scrape::bench_scrape_handler( let s = scrape::bench_scrape_handler(
&bench_config, &bench_config,
&aquatic_config,
&request_sender, &request_sender,
&response_receiver, &response_receiver,
&mut rng, &mut rng,

View file

@ -6,17 +6,15 @@ use indicatif::ProgressIterator;
use rand::Rng; use rand::Rng;
use rand_distr::Pareto; use rand_distr::Pareto;
use aquatic_udp::common::handlers::*;
use aquatic_udp::common::*; use aquatic_udp::common::*;
use aquatic_udp::config::Config; use aquatic_udp_protocol::*;
use crate::common::*; use crate::common::*;
use crate::config::BenchConfig; use crate::config::BenchConfig;
pub fn bench_scrape_handler( pub fn bench_scrape_handler(
bench_config: &BenchConfig, bench_config: &BenchConfig,
aquatic_config: &Config, request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>,
request_sender: &Sender<(ConnectedRequest, SocketAddr)>,
response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>,
rng: &mut impl Rng, rng: &mut impl Rng,
info_hashes: &[InfoHash], info_hashes: &[InfoHash],
@ -28,7 +26,7 @@ pub fn bench_scrape_handler(
bench_config.num_hashes_per_scrape_request, bench_config.num_hashes_per_scrape_request,
); );
let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads; let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
let mut num_responses = 0usize; let mut num_responses = 0usize;
let mut dummy: i32 = rng.gen(); let mut dummy: i32 = rng.gen();
@ -42,20 +40,25 @@ pub fn bench_scrape_handler(
for round in (0..bench_config.num_rounds).progress_with(pb) { for round in (0..bench_config.num_rounds).progress_with(pb) {
for request_chunk in requests.chunks(p) { for request_chunk in requests.chunks(p) {
for (request, src) in request_chunk { for (request, src) in request_chunk {
let request = ConnectedRequest::Scrape { let request = ConnectedRequest::Scrape(PendingScrapeRequest {
request: request.clone(), transaction_id: request.transaction_id,
original_indices: Vec::new(), info_hashes: request
}; .info_hashes
.clone()
.into_iter()
.enumerate()
.collect(),
});
request_sender.send((request, *src)).unwrap(); request_sender
.send((SocketWorkerIndex(0), request, *src))
.unwrap();
} }
while let Ok((ConnectedResponse::Scrape { response, .. }, _)) = while let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.try_recv() {
response_receiver.try_recv()
{
num_responses += 1; num_responses += 1;
if let Some(stat) = response.torrent_stats.last() { if let Some(stat) = response.torrent_stats.values().last() {
dummy ^= stat.leechers.0; dummy ^= stat.leechers.0;
} }
} }
@ -64,10 +67,10 @@ pub fn bench_scrape_handler(
let total = bench_config.num_scrape_requests * (round + 1); let total = bench_config.num_scrape_requests * (round + 1);
while num_responses < total { while num_responses < total {
if let Ok((ConnectedResponse::Scrape { response, .. }, _)) = response_receiver.recv() { if let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.recv() {
num_responses += 1; num_responses += 1;
if let Some(stat) = response.torrent_stats.last() { if let Some(stat) = response.torrent_stats.values().last() {
dummy ^= stat.leechers.0; dummy ^= stat.leechers.0;
} }
} }

View file

@ -17,16 +17,14 @@ anyhow = "1"
aquatic_cli_helpers = "0.1.0" aquatic_cli_helpers = "0.1.0"
aquatic_common = "0.1.0" aquatic_common = "0.1.0"
aquatic_udp_protocol = "0.1.0" aquatic_udp_protocol = "0.1.0"
crossbeam-channel = "0.5" hashbrown = "0.11"
hashbrown = "0.11.2"
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } mio = { version = "0.8", features = ["net", "os-poll"] }
parking_lot = "0.11"
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }
rand_distr = "0.4" rand_distr = "0.4"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
socket2 = { version = "0.4.1", features = ["all"] } socket2 = { version = "0.4", features = ["all"] }
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -1,153 +1,12 @@
use std::net::SocketAddr;
use std::sync::{atomic::AtomicUsize, Arc}; use std::sync::{atomic::AtomicUsize, Arc};
use aquatic_cli_helpers::LogLevel;
#[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::CpuPinningConfig;
use hashbrown::HashMap; use hashbrown::HashMap;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use aquatic_udp_protocol::*; use aquatic_udp_protocol::*;
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub struct ThreadId(pub u8); pub struct ThreadId(pub u8);
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Server address
pub server_address: SocketAddr,
pub log_level: LogLevel,
/// Number of sockets and socket worker threads
///
/// Sockets will bind to one port each, and with
/// multiple_client_ips = true, additionally to one IP each.
pub num_socket_workers: u8,
/// Number of workers generating requests from responses, as well as
/// requests not connected to previous ones.
pub num_request_workers: usize,
/// Run duration (quit and generate report after this many seconds)
pub duration: usize,
pub network: NetworkConfig,
pub handler: HandlerConfig,
#[cfg(feature = "cpu-pinning")]
pub cpu_pinning: CpuPinningConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// True means bind to one localhost IP per socket. On macOS, this by
/// default causes all server responses to go to one socket worker.
/// Default option ("true") can cause issues on macOS.
///
/// The point of multiple IPs is to possibly cause a better distribution
/// of requests to servers with SO_REUSEPORT option.
pub multiple_client_ips: bool,
/// Use Ipv6 only
pub ipv6_client: bool,
/// Number of first client port
pub first_port: u16,
/// Socket worker poll timeout in microseconds
pub poll_timeout: u64,
/// Socket worker polling event number
pub poll_event_capacity: usize,
/// Size of socket recv buffer. Use 0 for OS default.
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
///
/// macOS:
/// $ sudo sysctl net.inet.udp.recvspace=6000000
/// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended
/// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended
///
/// Linux:
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub recv_buffer: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Number of torrents to simulate
pub number_of_torrents: usize,
/// Maximum number of torrents to ask about in scrape requests
pub scrape_max_torrents: usize,
/// Handler: max number of responses to collect for before processing
pub max_responses_per_iter: usize,
/// Probability that a generated request is a connect request as part
/// of sum of the various weight arguments.
pub weight_connect: usize,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Handler: max microseconds to wait for single response from channel
pub channel_timeout: u64,
/// Pareto shape
///
/// Fake peers choose torrents according to Pareto distribution.
pub torrent_selection_pareto_shape: f64,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
/// Part of additional request creation calculation, meaning requests
/// which are not dependent on previous responses from server. Higher
/// means more.
pub additional_request_factor: f64,
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
log_level: LogLevel::Error,
num_socket_workers: 1,
num_request_workers: 1,
duration: 0,
network: NetworkConfig::default(),
handler: HandlerConfig::default(),
#[cfg(feature = "cpu-pinning")]
cpu_pinning: CpuPinningConfig::default_for_load_test(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
multiple_client_ips: true,
ipv6_client: false,
first_port: 45_000,
poll_timeout: 276,
poll_event_capacity: 2_877,
recv_buffer: 6_000_000,
}
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
scrape_max_torrents: 50,
weight_connect: 0,
weight_announce: 1,
weight_scrape: 1,
additional_request_factor: 0.4,
max_responses_per_iter: 10_000,
channel_timeout: 200,
torrent_selection_pareto_shape: 2.0,
}
}
}
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct TorrentPeer { pub struct TorrentPeer {
pub info_hash: InfoHash, pub info_hash: InfoHash,
@ -171,7 +30,6 @@ pub struct Statistics {
#[derive(Clone)] #[derive(Clone)]
pub struct LoadTestState { pub struct LoadTestState {
pub torrent_peers: Arc<Mutex<TorrentPeerMap>>,
pub info_hashes: Arc<Vec<InfoHash>>, pub info_hashes: Arc<Vec<InfoHash>>,
pub statistics: Arc<Statistics>, pub statistics: Arc<Statistics>,
} }

View file

@ -0,0 +1,123 @@
use std::net::SocketAddr;
use serde::{Deserialize, Serialize};
use aquatic_cli_helpers::LogLevel;
#[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::CpuPinningConfig;
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Server address
///
/// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4
/// address here.
pub server_address: SocketAddr,
pub log_level: LogLevel,
pub workers: u8,
/// Run duration (quit and generate report after this many seconds)
pub duration: usize,
pub network: NetworkConfig,
pub handler: HandlerConfig,
#[cfg(feature = "cpu-pinning")]
pub cpu_pinning: CpuPinningConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// True means bind to one localhost IP per socket.
///
/// The point of multiple IPs is to cause a better distribution
/// of requests to servers with SO_REUSEPORT option.
///
/// Setting this to true can cause issues on macOS.
pub multiple_client_ipv4s: bool,
/// Number of first client port
pub first_port: u16,
/// Socket worker poll timeout in microseconds
pub poll_timeout: u64,
/// Socket worker polling event number
pub poll_event_capacity: usize,
/// Size of socket recv buffer. Use 0 for OS default.
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
///
/// macOS:
/// $ sudo sysctl net.inet.udp.recvspace=6000000
/// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended
/// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended
///
/// Linux:
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub recv_buffer: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Number of torrents to simulate
pub number_of_torrents: usize,
/// Maximum number of torrents to ask about in scrape requests
pub scrape_max_torrents: usize,
/// Probability that a generated request is a connect request as part
/// of sum of the various weight arguments.
pub weight_connect: usize,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Pareto shape
///
/// Fake peers choose torrents according to Pareto distribution.
pub torrent_selection_pareto_shape: f64,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
log_level: LogLevel::Error,
workers: 1,
duration: 0,
network: NetworkConfig::default(),
handler: HandlerConfig::default(),
#[cfg(feature = "cpu-pinning")]
cpu_pinning: CpuPinningConfig::default_for_load_test(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
multiple_client_ipv4s: true,
first_port: 45_000,
poll_timeout: 276,
poll_event_capacity: 2_877,
recv_buffer: 6_000_000,
}
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
scrape_max_torrents: 50,
weight_connect: 0,
weight_announce: 5,
weight_scrape: 1,
torrent_selection_pareto_shape: 2.0,
}
}
}

View file

@ -1,9 +1,5 @@
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use std::vec::Drain;
use crossbeam_channel::{Receiver, Sender};
use parking_lot::MutexGuard;
use rand::distributions::WeightedIndex; use rand::distributions::WeightedIndex;
use rand::prelude::*; use rand::prelude::*;
use rand_distr::Pareto; use rand_distr::Pareto;
@ -11,127 +7,10 @@ use rand_distr::Pareto;
use aquatic_udp_protocol::*; use aquatic_udp_protocol::*;
use crate::common::*; use crate::common::*;
use crate::config::Config;
use crate::utils::*; use crate::utils::*;
pub fn run_handler_thread( pub fn process_response(
config: &Config,
state: LoadTestState,
pareto: Pareto<f64>,
request_senders: Vec<Sender<Request>>,
response_receiver: Receiver<(ThreadId, Response)>,
) {
let state = &state;
let mut rng1 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
let mut rng2 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
let timeout = Duration::from_micros(config.handler.channel_timeout);
let mut responses = Vec::new();
loop {
let mut opt_torrent_peers = None;
// Collect a maximum number of responses. Stop collecting before that
// number is reached if having waited for too long for a request, but
// only if ConnectionMap mutex isn't locked.
for i in 0..config.handler.max_responses_per_iter {
let response = if i == 0 {
match response_receiver.recv() {
Ok(r) => r,
Err(_) => break, // Really shouldn't happen
}
} else {
match response_receiver.recv_timeout(timeout) {
Ok(r) => r,
Err(_) => {
if let Some(guard) = state.torrent_peers.try_lock() {
opt_torrent_peers = Some(guard);
break;
} else {
continue;
}
}
}
};
responses.push(response);
}
let mut torrent_peers: MutexGuard<TorrentPeerMap> =
opt_torrent_peers.unwrap_or_else(|| state.torrent_peers.lock());
let requests = process_responses(
&mut rng1,
pareto,
&state.info_hashes,
config,
&mut torrent_peers,
responses.drain(..),
);
// Somewhat dubious heuristic for deciding how fast to create
// and send additional requests (requests not having anything
// to do with previously sent requests)
let num_additional_to_send = {
let num_additional_requests = requests.iter().map(|v| v.len()).sum::<usize>() as f64;
let num_new_requests_per_socket =
num_additional_requests / config.num_socket_workers as f64;
((num_new_requests_per_socket / 1.2) * config.handler.additional_request_factor)
as usize
+ 10
};
for (channel_index, new_requests) in requests.into_iter().enumerate() {
let channel = &request_senders[channel_index];
for _ in 0..num_additional_to_send {
let request = create_connect_request(generate_transaction_id(&mut rng2));
channel
.send(request)
.expect("send request to channel in handler worker");
}
for request in new_requests.into_iter() {
channel
.send(request)
.expect("send request to channel in handler worker");
}
}
}
}
fn process_responses(
rng: &mut impl Rng,
pareto: Pareto<f64>,
info_hashes: &Arc<Vec<InfoHash>>,
config: &Config,
torrent_peers: &mut TorrentPeerMap,
responses: Drain<(ThreadId, Response)>,
) -> Vec<Vec<Request>> {
let mut new_requests = Vec::with_capacity(config.num_socket_workers as usize);
for _ in 0..config.num_socket_workers {
new_requests.push(Vec::new());
}
for (socket_thread_id, response) in responses.into_iter() {
let opt_request =
process_response(rng, pareto, info_hashes, &config, torrent_peers, response);
if let Some(new_request) = opt_request {
new_requests[socket_thread_id.0 as usize].push(new_request);
}
}
new_requests
}
fn process_response(
rng: &mut impl Rng, rng: &mut impl Rng,
pareto: Pareto<f64>, pareto: Pareto<f64>,
info_hashes: &Arc<Vec<InfoHash>>, info_hashes: &Arc<Vec<InfoHash>>,
@ -165,7 +44,14 @@ fn process_response(
Some(request) Some(request)
} }
Response::Announce(r) => if_torrent_peer_move_and_create_random_request( Response::AnnounceIpv4(r) => if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id,
),
Response::AnnounceIpv6(r) => if_torrent_peer_move_and_create_random_request(
config, config,
rng, rng,
info_hashes, info_hashes,

View file

@ -5,19 +5,16 @@ use std::time::{Duration, Instant};
#[cfg(feature = "cpu-pinning")] #[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
use crossbeam_channel::unbounded;
use hashbrown::HashMap;
use parking_lot::Mutex;
use rand::prelude::*;
use rand_distr::Pareto; use rand_distr::Pareto;
mod common; mod common;
mod config;
mod handler; mod handler;
mod network; mod network;
mod utils; mod utils;
use common::*; use common::*;
use handler::run_handler_thread; use config::Config;
use network::*; use network::*;
use utils::*; use utils::*;
@ -54,91 +51,48 @@ fn run(config: Config) -> ::anyhow::Result<()> {
} }
let state = LoadTestState { let state = LoadTestState {
torrent_peers: Arc::new(Mutex::new(HashMap::new())),
info_hashes: Arc::new(info_hashes), info_hashes: Arc::new(info_hashes),
statistics: Arc::new(Statistics::default()), statistics: Arc::new(Statistics::default()),
}; };
let pareto = Pareto::new(1.0, config.handler.torrent_selection_pareto_shape).unwrap(); let pareto = Pareto::new(1.0, config.handler.torrent_selection_pareto_shape).unwrap();
// Start socket workers // Start workers
let (response_sender, response_receiver) = unbounded(); for i in 0..config.workers {
let mut request_senders = Vec::new();
for i in 0..config.num_socket_workers {
let thread_id = ThreadId(i); let thread_id = ThreadId(i);
let (sender, receiver) = unbounded();
let port = config.network.first_port + (i as u16); let port = config.network.first_port + (i as u16);
let addr = if config.network.multiple_client_ips { let ip = if config.server_address.is_ipv6() {
let ip = if config.network.ipv6_client {
// FIXME: test ipv6
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1 + i as u16).into()
} else {
Ipv4Addr::new(127, 0, 0, 1 + i).into()
};
SocketAddr::new(ip, port)
} else {
let ip = if config.network.ipv6_client {
Ipv6Addr::LOCALHOST.into() Ipv6Addr::LOCALHOST.into()
} else {
if config.network.multiple_client_ipv4s {
Ipv4Addr::new(127, 0, 0, 1 + i).into()
} else { } else {
Ipv4Addr::LOCALHOST.into() Ipv4Addr::LOCALHOST.into()
}
}; };
SocketAddr::new(ip, port) let addr = SocketAddr::new(ip, port);
};
request_senders.push(sender);
let config = config.clone(); let config = config.clone();
let response_sender = response_sender.clone();
let state = state.clone(); let state = state.clone();
thread::spawn(move || { thread::spawn(move || {
#[cfg(feature = "cpu-pinning")] #[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to( pin_current_if_configured_to(
&config.cpu_pinning, &config.cpu_pinning,
config.num_socket_workers as usize, config.workers as usize,
WorkerIndex::SocketWorker(i as usize), WorkerIndex::SocketWorker(i as usize),
); );
run_socket_thread(state, response_sender, receiver, &config, addr, thread_id) run_worker_thread(state, pareto, &config, addr, thread_id)
}); });
} }
for i in 0..config.num_request_workers {
let config = config.clone();
let state = state.clone();
let request_senders = request_senders.clone();
let response_receiver = response_receiver.clone();
thread::spawn(move || {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.num_socket_workers as usize,
WorkerIndex::RequestWorker(i as usize),
);
run_handler_thread(&config, state, pareto, request_senders, response_receiver)
});
}
// Bootstrap request cycle by adding a request to each request channel
for sender in request_senders.iter() {
let request = create_connect_request(generate_transaction_id(&mut thread_rng()));
sender
.send(request)
.expect("bootstrap: add initial request to request queue");
}
#[cfg(feature = "cpu-pinning")] #[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to( pin_current_if_configured_to(
&config.cpu_pinning, &config.cpu_pinning,
config.num_socket_workers as usize, config.workers as usize,
WorkerIndex::Other, WorkerIndex::Other,
); );

View file

@ -3,13 +3,15 @@ use std::net::SocketAddr;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::time::Duration; use std::time::Duration;
use crossbeam_channel::{Receiver, Sender};
use mio::{net::UdpSocket, Events, Interest, Poll, Token}; use mio::{net::UdpSocket, Events, Interest, Poll, Token};
use rand::{prelude::SmallRng, thread_rng, SeedableRng};
use rand_distr::Pareto;
use socket2::{Domain, Protocol, Socket, Type}; use socket2::{Domain, Protocol, Socket, Type};
use aquatic_udp_protocol::*; use aquatic_udp_protocol::*;
use crate::common::*; use crate::config::Config;
use crate::{common::*, handler::process_response, utils::*};
const MAX_PACKET_SIZE: usize = 4096; const MAX_PACKET_SIZE: usize = 4096;
@ -45,10 +47,9 @@ pub fn create_socket(config: &Config, addr: SocketAddr) -> ::std::net::UdpSocket
socket.into() socket.into()
} }
pub fn run_socket_thread( pub fn run_worker_thread(
state: LoadTestState, state: LoadTestState,
response_channel_sender: Sender<(ThreadId, Response)>, pareto: Pareto<f64>,
request_receiver: Receiver<Request>,
config: &Config, config: &Config,
addr: SocketAddr, addr: SocketAddr,
thread_id: ThreadId, thread_id: ThreadId,
@ -56,6 +57,9 @@ pub fn run_socket_thread(
let mut socket = UdpSocket::from_std(create_socket(config, addr)); let mut socket = UdpSocket::from_std(create_socket(config, addr));
let mut buffer = [0u8; MAX_PACKET_SIZE]; let mut buffer = [0u8; MAX_PACKET_SIZE];
let mut rng = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
let mut torrent_peers = TorrentPeerMap::default();
let token = Token(thread_id.0 as usize); let token = Token(thread_id.0 as usize);
let interests = Interest::READABLE; let interests = Interest::READABLE;
let timeout = Duration::from_micros(config.network.poll_timeout); let timeout = Duration::from_micros(config.network.poll_timeout);
@ -68,8 +72,11 @@ pub fn run_socket_thread(
let mut events = Events::with_capacity(config.network.poll_event_capacity); let mut events = Events::with_capacity(config.network.poll_event_capacity);
let mut local_state = SocketWorkerLocalStatistics::default(); let mut statistics = SocketWorkerLocalStatistics::default();
let mut responses = Vec::new();
// Bootstrap request cycle
let initial_request = create_connect_request(generate_transaction_id(&mut thread_rng()));
send_request(&mut socket, &mut buffer, &mut statistics, initial_request);
loop { loop {
poll.poll(&mut events, Some(timeout)) poll.poll(&mut events, Some(timeout))
@ -77,98 +84,73 @@ pub fn run_socket_thread(
for event in events.iter() { for event in events.iter() {
if (event.token() == token) & event.is_readable() { if (event.token() == token) & event.is_readable() {
read_responses( while let Ok(amt) = socket.recv(&mut buffer) {
thread_id,
&socket,
&mut buffer,
&mut local_state,
&mut responses,
);
for r in responses.drain(..) {
response_channel_sender.send(r).unwrap_or_else(|err| {
panic!(
"add response to channel in socket worker {}: {:?}",
thread_id.0, err
)
});
}
poll.registry()
.reregister(&mut socket, token, interests)
.unwrap();
}
send_requests(
&state,
&mut socket,
&mut buffer,
&request_receiver,
&mut local_state,
);
}
send_requests(
&state,
&mut socket,
&mut buffer,
&request_receiver,
&mut local_state,
);
}
}
fn read_responses(
thread_id: ThreadId,
socket: &UdpSocket,
buffer: &mut [u8],
ls: &mut SocketWorkerLocalStatistics,
responses: &mut Vec<(ThreadId, Response)>,
) {
while let Ok(amt) = socket.recv(buffer) {
match Response::from_bytes(&buffer[0..amt]) { match Response::from_bytes(&buffer[0..amt]) {
Ok(response) => { Ok(response) => {
match response { match response {
Response::Announce(ref r) => { Response::AnnounceIpv4(ref r) => {
ls.responses_announce += 1; statistics.responses_announce += 1;
ls.response_peers += r.peers.len(); statistics.response_peers += r.peers.len();
}
Response::AnnounceIpv6(ref r) => {
statistics.responses_announce += 1;
statistics.response_peers += r.peers.len();
} }
Response::Scrape(_) => { Response::Scrape(_) => {
ls.responses_scrape += 1; statistics.responses_scrape += 1;
} }
Response::Connect(_) => { Response::Connect(_) => {
ls.responses_connect += 1; statistics.responses_connect += 1;
} }
Response::Error(_) => { Response::Error(_) => {
ls.responses_error += 1; statistics.responses_error += 1;
} }
} }
responses.push((thread_id, response)) let opt_request = process_response(
&mut rng,
pareto,
&state.info_hashes,
&config,
&mut torrent_peers,
response,
);
if let Some(request) = opt_request {
send_request(&mut socket, &mut buffer, &mut statistics, request);
}
} }
Err(err) => { Err(err) => {
eprintln!("Received invalid response: {:#?}", err); eprintln!("Received invalid response: {:#?}", err);
} }
} }
} }
let additional_request = create_connect_request(generate_transaction_id(&mut rng));
send_request(
&mut socket,
&mut buffer,
&mut statistics,
additional_request,
);
update_shared_statistics(&state, &mut statistics);
}
}
}
} }
fn send_requests( fn send_request(
state: &LoadTestState,
socket: &mut UdpSocket, socket: &mut UdpSocket,
buffer: &mut [u8], buffer: &mut [u8],
receiver: &Receiver<Request>,
statistics: &mut SocketWorkerLocalStatistics, statistics: &mut SocketWorkerLocalStatistics,
request: Request,
) { ) {
let mut cursor = Cursor::new(buffer); let mut cursor = Cursor::new(buffer);
while let Ok(request) = receiver.try_recv() { match request.write(&mut cursor) {
cursor.set_position(0); Ok(()) => {
if let Err(err) = request.write(&mut cursor) {
eprintln!("request_to_bytes err: {}", err);
}
let position = cursor.position() as usize; let position = cursor.position() as usize;
let inner = cursor.get_ref(); let inner = cursor.get_ref();
@ -181,7 +163,13 @@ fn send_requests(
} }
} }
} }
Err(err) => {
eprintln!("request_to_bytes err: {}", err);
}
}
}
fn update_shared_statistics(state: &LoadTestState, statistics: &mut SocketWorkerLocalStatistics) {
state state
.statistics .statistics
.requests .requests

View file

@ -6,6 +6,7 @@ use rand_distr::Pareto;
use aquatic_udp_protocol::*; use aquatic_udp_protocol::*;
use crate::common::*; use crate::common::*;
use crate::config::Config;
pub fn create_torrent_peer( pub fn create_torrent_peer(
config: &Config, config: &Config,

View file

@ -12,5 +12,5 @@ byteorder = "1"
either = "1" either = "1"
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -1,10 +1,4 @@
use std::net::IpAddr; use std::net::{Ipv4Addr, Ipv6Addr};
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum IpVersion {
IPv4,
IPv6,
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct AnnounceInterval(pub i32); pub struct AnnounceInterval(pub i32);
@ -37,20 +31,15 @@ pub struct PeerId(pub [u8; 20]);
pub struct PeerKey(pub u32); pub struct PeerKey(pub u32);
#[derive(Hash, PartialEq, Eq, Clone, Debug)] #[derive(Hash, PartialEq, Eq, Clone, Debug)]
pub struct ResponsePeer { pub struct ResponsePeerIpv4 {
pub ip_address: IpAddr, pub ip_address: Ipv4Addr,
pub port: Port, pub port: Port,
} }
#[cfg(test)] #[derive(Hash, PartialEq, Eq, Clone, Debug)]
impl quickcheck::Arbitrary for IpVersion { pub struct ResponsePeerIpv6 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self { pub ip_address: Ipv6Addr,
if bool::arbitrary(g) { pub port: Port,
IpVersion::IPv4
} else {
IpVersion::IPv6
}
}
} }
#[cfg(test)] #[cfg(test)]
@ -80,11 +69,21 @@ impl quickcheck::Arbitrary for PeerId {
} }
#[cfg(test)] #[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeer { impl quickcheck::Arbitrary for ResponsePeerIpv4 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self { fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self { Self {
ip_address: ::std::net::IpAddr::arbitrary(g), ip_address: quickcheck::Arbitrary::arbitrary(g),
port: Port(u16::arbitrary(g)), port: Port(u16::arbitrary(g).into()),
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeerIpv6 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
ip_address: quickcheck::Arbitrary::arbitrary(g),
port: Port(u16::arbitrary(g).into()),
} }
} }
} }

View file

@ -1,7 +1,7 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::convert::TryInto; use std::convert::TryInto;
use std::io::{self, Cursor, Write}; use std::io::{self, Cursor, Write};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::net::{Ipv4Addr, Ipv6Addr};
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt}; use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
@ -21,12 +21,21 @@ pub struct ConnectResponse {
} }
#[derive(PartialEq, Eq, Clone, Debug)] #[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceResponse { pub struct AnnounceResponseIpv4 {
pub transaction_id: TransactionId, pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval, pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers, pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers, pub seeders: NumberOfPeers,
pub peers: Vec<ResponsePeer>, pub peers: Vec<ResponsePeerIpv4>,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceResponseIpv6 {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
pub peers: Vec<ResponsePeerIpv6>,
} }
#[derive(PartialEq, Eq, Clone, Debug)] #[derive(PartialEq, Eq, Clone, Debug)]
@ -44,7 +53,8 @@ pub struct ErrorResponse {
#[derive(PartialEq, Eq, Clone, Debug)] #[derive(PartialEq, Eq, Clone, Debug)]
pub enum Response { pub enum Response {
Connect(ConnectResponse), Connect(ConnectResponse),
Announce(AnnounceResponse), AnnounceIpv4(AnnounceResponseIpv4),
AnnounceIpv6(AnnounceResponseIpv6),
Scrape(ScrapeResponse), Scrape(ScrapeResponse),
Error(ErrorResponse), Error(ErrorResponse),
} }
@ -55,9 +65,15 @@ impl From<ConnectResponse> for Response {
} }
} }
impl From<AnnounceResponse> for Response { impl From<AnnounceResponseIpv4> for Response {
fn from(r: AnnounceResponse) -> Self { fn from(r: AnnounceResponseIpv4) -> Self {
Self::Announce(r) Self::AnnounceIpv4(r)
}
}
impl From<AnnounceResponseIpv6> for Response {
fn from(r: AnnounceResponseIpv6) -> Self {
Self::AnnounceIpv6(r)
} }
} }
@ -81,44 +97,25 @@ impl Response {
/// addresses. Clients seem not to support it very well, but due to a lack /// addresses. Clients seem not to support it very well, but due to a lack
/// of alternative solutions, it is implemented here. /// of alternative solutions, it is implemented here.
#[inline] #[inline]
pub fn write(self, bytes: &mut impl Write, ip_version: IpVersion) -> Result<(), io::Error> { pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> {
match self { match self {
Response::Connect(r) => { Response::Connect(r) => {
bytes.write_i32::<NetworkEndian>(0)?; bytes.write_i32::<NetworkEndian>(0)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?; bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?; bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
} }
Response::Announce(r) => { Response::AnnounceIpv4(r) => {
if ip_version == IpVersion::IPv4 {
bytes.write_i32::<NetworkEndian>(1)?; bytes.write_i32::<NetworkEndian>(1)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?; bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?; bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
bytes.write_i32::<NetworkEndian>(r.leechers.0)?; bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
bytes.write_i32::<NetworkEndian>(r.seeders.0)?; bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
// Silently ignore peers with wrong IP version
for peer in r.peers { for peer in r.peers {
if let IpAddr::V4(ip) = peer.ip_address { bytes.write_all(&peer.ip_address.octets())?;
bytes.write_all(&ip.octets())?;
bytes.write_u16::<NetworkEndian>(peer.port.0)?; bytes.write_u16::<NetworkEndian>(peer.port.0)?;
} }
} }
} else {
bytes.write_i32::<NetworkEndian>(4)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
// Silently ignore peers with wrong IP version
for peer in r.peers {
if let IpAddr::V6(ip) = peer.ip_address {
bytes.write_all(&ip.octets())?;
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
}
}
}
}
Response::Scrape(r) => { Response::Scrape(r) => {
bytes.write_i32::<NetworkEndian>(2)?; bytes.write_i32::<NetworkEndian>(2)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?; bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
@ -135,6 +132,18 @@ impl Response {
bytes.write_all(r.message.as_bytes())?; bytes.write_all(r.message.as_bytes())?;
} }
Response::AnnounceIpv6(r) => {
bytes.write_i32::<NetworkEndian>(4)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
for peer in r.peers {
bytes.write_all(&peer.ip_address.octets())?;
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
}
}
} }
Ok(()) Ok(())
@ -171,17 +180,17 @@ impl Response {
.chunks_exact(6) .chunks_exact(6)
.map(|chunk| { .map(|chunk| {
let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap(); let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap();
let ip_address = IpAddr::V4(Ipv4Addr::from(ip_bytes)); let ip_address = Ipv4Addr::from(ip_bytes);
let port = (&chunk[4..]).read_u16::<NetworkEndian>().unwrap(); let port = (&chunk[4..]).read_u16::<NetworkEndian>().unwrap();
ResponsePeer { ResponsePeerIpv4 {
ip_address, ip_address,
port: Port(port), port: Port(port),
} }
}) })
.collect(); .collect();
Ok((AnnounceResponse { Ok((AnnounceResponseIpv4 {
transaction_id: TransactionId(transaction_id), transaction_id: TransactionId(transaction_id),
announce_interval: AnnounceInterval(announce_interval), announce_interval: AnnounceInterval(announce_interval),
leechers: NumberOfPeers(leechers), leechers: NumberOfPeers(leechers),
@ -244,17 +253,17 @@ impl Response {
.chunks_exact(18) .chunks_exact(18)
.map(|chunk| { .map(|chunk| {
let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap(); let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap();
let ip_address = IpAddr::V6(Ipv6Addr::from(ip_bytes)); let ip_address = Ipv6Addr::from(ip_bytes);
let port = (&chunk[16..]).read_u16::<NetworkEndian>().unwrap(); let port = (&chunk[16..]).read_u16::<NetworkEndian>().unwrap();
ResponsePeer { ResponsePeerIpv6 {
ip_address, ip_address,
port: Port(port), port: Port(port),
} }
}) })
.collect(); .collect();
Ok((AnnounceResponse { Ok((AnnounceResponseIpv6 {
transaction_id: TransactionId(transaction_id), transaction_id: TransactionId(transaction_id),
announce_interval: AnnounceInterval(announce_interval), announce_interval: AnnounceInterval(announce_interval),
leechers: NumberOfPeers(leechers), leechers: NumberOfPeers(leechers),
@ -297,10 +306,26 @@ mod tests {
} }
} }
impl quickcheck::Arbitrary for AnnounceResponse { impl quickcheck::Arbitrary for AnnounceResponseIpv4 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self { fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let peers = (0..u8::arbitrary(g)) let peers = (0..u8::arbitrary(g))
.map(|_| ResponsePeer::arbitrary(g)) .map(|_| ResponsePeerIpv4::arbitrary(g))
.collect();
Self {
transaction_id: TransactionId(i32::arbitrary(g)),
announce_interval: AnnounceInterval(i32::arbitrary(g)),
leechers: NumberOfPeers(i32::arbitrary(g)),
seeders: NumberOfPeers(i32::arbitrary(g)),
peers,
}
}
}
impl quickcheck::Arbitrary for AnnounceResponseIpv6 {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let peers = (0..u8::arbitrary(g))
.map(|_| ResponsePeerIpv6::arbitrary(g))
.collect(); .collect();
Self { Self {
@ -326,10 +351,10 @@ mod tests {
} }
} }
fn same_after_conversion(response: Response, ip_version: IpVersion) -> bool { fn same_after_conversion(response: Response) -> bool {
let mut buf = Vec::new(); let mut buf = Vec::new();
response.clone().write(&mut buf, ip_version).unwrap(); response.clone().write(&mut buf).unwrap();
let r2 = Response::from_bytes(&buf[..]).unwrap(); let r2 = Response::from_bytes(&buf[..]).unwrap();
let success = response == r2; let success = response == r2;
@ -343,24 +368,21 @@ mod tests {
#[quickcheck] #[quickcheck]
fn test_connect_response_convert_identity(response: ConnectResponse) -> bool { fn test_connect_response_convert_identity(response: ConnectResponse) -> bool {
same_after_conversion(response.into(), IpVersion::IPv4) same_after_conversion(response.into())
} }
#[quickcheck] #[quickcheck]
fn test_announce_response_convert_identity(data: (AnnounceResponse, IpVersion)) -> bool { fn test_announce_response_ipv4_convert_identity(response: AnnounceResponseIpv4) -> bool {
let mut r = data.0; same_after_conversion(response.into())
if data.1 == IpVersion::IPv4 {
r.peers.retain(|peer| peer.ip_address.is_ipv4());
} else {
r.peers.retain(|peer| peer.ip_address.is_ipv6());
} }
same_after_conversion(r.into(), data.1) #[quickcheck]
fn test_announce_response_ipv6_convert_identity(response: AnnounceResponseIpv6) -> bool {
same_after_conversion(response.into())
} }
#[quickcheck] #[quickcheck]
fn test_scrape_response_convert_identity(response: ScrapeResponse) -> bool { fn test_scrape_response_convert_identity(response: ScrapeResponse) -> bool {
same_after_conversion(response.into(), IpVersion::IPv4) same_after_conversion(response.into())
} }
} }

View file

@ -28,7 +28,7 @@ aquatic_common = "0.1.0"
aquatic_ws_protocol = "0.1.0" aquatic_ws_protocol = "0.1.0"
cfg-if = "1" cfg-if = "1"
either = "1" either = "1"
hashbrown = { version = "0.11.2", features = ["serde"] } hashbrown = { version = "0.11", features = ["serde"] }
log = "0.4" log = "0.4"
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
privdrop = "0.5" privdrop = "0.5"
@ -41,10 +41,10 @@ tungstenite = "0.15"
# mio # mio
crossbeam-channel = { version = "0.5", optional = true } crossbeam-channel = { version = "0.5", optional = true }
histogram = { version = "0.6", optional = true } histogram = { version = "0.6", optional = true }
mio = { version = "0.7", features = ["tcp", "os-poll", "os-util"], optional = true } mio = { version = "0.8", features = ["net", "os-poll"], optional = true }
native-tls = { version = "0.2", optional = true } native-tls = { version = "0.2", optional = true }
parking_lot = { version = "0.11", optional = true } parking_lot = { version = "0.11", optional = true }
socket2 = { version = "0.4.1", features = ["all"], optional = true } socket2 = { version = "0.4", features = ["all"], optional = true }
# glommio # glommio
async-tungstenite = { version = "0.15", optional = true } async-tungstenite = { version = "0.15", optional = true }
@ -55,5 +55,5 @@ glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f
rustls-pemfile = { version = "0.2", optional = true } rustls-pemfile = { version = "0.2", optional = true }
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -21,7 +21,7 @@ aquatic_ws_protocol = "0.1.0"
futures = "0.3" futures = "0.3"
futures-rustls = "0.22" futures-rustls = "0.22"
glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" } glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" }
hashbrown = { version = "0.11.2", features = ["serde"] } hashbrown = { version = "0.11", features = ["serde"] }
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }
rand_distr = "0.4" rand_distr = "0.4"
@ -31,5 +31,5 @@ serde_json = "1"
tungstenite = "0.15" tungstenite = "0.15"
[dev-dependencies] [dev-dependencies]
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -18,13 +18,13 @@ harness = false
[dependencies] [dependencies]
anyhow = "1" anyhow = "1"
hashbrown = { version = "0.11.2", features = ["serde"] } hashbrown = { version = "0.11", features = ["serde"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
simd-json = { version = "0.4.7", features = ["allow-non-simd"] } simd-json = { version = "0.4", features = ["allow-non-simd"] }
tungstenite = "0.15" tungstenite = "0.15"
[dev-dependencies] [dev-dependencies]
criterion = "0.3" criterion = "0.3"
quickcheck = "1.0" quickcheck = "1"
quickcheck_macros = "1.0" quickcheck_macros = "1"

View file

@ -2,12 +2,4 @@
. ./scripts/env-native-cpu-without-avx-512 . ./scripts/env-native-cpu-without-avx-512
if [ "$1" != "mio" ] && [ "$1" != "glommio" ]; then cargo run --release --bin aquatic_udp -- $@
echo "Usage: $0 [mio|glommio] [ARGS]"
else
if [ "$1" = "mio" ]; then
cargo run --release --bin aquatic_udp -- "${@:2}"
else
cargo run --release --features "with-glommio" --no-default-features --bin aquatic_udp -- "${@:2}"
fi
fi

3
scripts/watch-threads.sh Executable file
View file

@ -0,0 +1,3 @@
#!/bin/sh
watch -d -n 0.5 ps H -o euser,pid,tid,comm,%mem,rss,%cpu,psr -p `pgrep aquatic`