Merge pull request #160 from greatest-ape/udp-thingbuf2

udp improvements; update dependencies
This commit is contained in:
Joakim Frostegård 2023-12-10 12:31:22 +01:00 committed by GitHub
commit c7997d5aed
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 1879 additions and 2125 deletions

View file

@ -1,10 +0,0 @@
# Not used by Github action, but can be used to run test locally:
# 1. docker build -t aquatic ./path/to/Dockerfile
# 2. docker run aquatic
# 3. On failure, run `docker rmi aquatic -f` and go back to step 1
FROM rust:bullseye
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -68,7 +68,10 @@ tls_private_key_path = './key.pk8'
" > tls.toml
./target/debug/aquatic http -c tls.toml > "$HOME/tls.log" 2>&1 &
echo "[network]
echo "
log_level = 'trace'
[network]
address = '127.0.0.1:3000'" > udp.toml
./target/debug/aquatic udp -c udp.toml > "$HOME/udp.log" 2>&1 &

View file

@ -63,7 +63,9 @@ jobs:
- name: Setup Rust dependency caching
uses: Swatinem/rust-cache@v2
- name: Run tests
run: cargo test --verbose --workspace --profile "test-fast"
run: cargo test --verbose --profile "test-fast" --workspace
- name: Run tests (aquatic_udp with io_uring)
run: cargo test --verbose --profile "test-fast" -p aquatic_udp --features "io-uring"
test-file-transfers:
runs-on: ubuntu-latest

View file

@ -14,6 +14,14 @@
* Add support for reporting peer client information
#### Changed
* Remove support for unbounded worker channels
* Add backpressure in socket workers. They will postpone reading from the
socket if sending a request to a swarm worker failed
* Reuse allocations in swarm response channel
* Remove config key `network.poll_event_capacity`
### aquatic_http
#### Added

418
Cargo.lock generated
View file

@ -96,7 +96,7 @@ dependencies = [
"duplicate",
"git-testament",
"glommio",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"hex",
"hwloc",
"indexmap 2.1.0",
@ -159,7 +159,7 @@ dependencies = [
"futures-lite",
"futures-rustls",
"glommio",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"log",
"mimalloc",
"quickcheck",
@ -199,6 +199,7 @@ dependencies = [
"quickcheck",
"regex",
"serde",
"zerocopy",
]
[[package]]
@ -235,7 +236,7 @@ dependencies = [
"constant_time_eq",
"crossbeam-channel",
"getrandom",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"hdrhistogram",
"hex",
"io-uring",
@ -255,28 +256,11 @@ dependencies = [
"slab",
"socket2 0.5.5",
"tempfile",
"thingbuf",
"time",
"tinytemplate",
]
[[package]]
name = "aquatic_udp_bench"
version = "0.8.0"
dependencies = [
"anyhow",
"aquatic_common",
"aquatic_toml_config",
"aquatic_udp",
"aquatic_udp_protocol",
"crossbeam-channel",
"indicatif",
"mimalloc",
"num-format",
"rand",
"rand_distr",
"serde",
]
[[package]]
name = "aquatic_udp_load_test"
version = "0.8.0"
@ -285,7 +269,7 @@ dependencies = [
"aquatic_common",
"aquatic_toml_config",
"aquatic_udp_protocol",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"mimalloc",
"mio",
"quickcheck",
@ -303,8 +287,10 @@ dependencies = [
"aquatic_peer_id",
"byteorder",
"either",
"pretty_assertions",
"quickcheck",
"quickcheck_macros",
"zerocopy",
]
[[package]]
@ -323,7 +309,7 @@ dependencies = [
"futures-lite",
"futures-rustls",
"glommio",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"httparse",
"indexmap 2.1.0",
"log",
@ -375,7 +361,7 @@ version = "0.8.0"
dependencies = [
"anyhow",
"criterion 0.5.1",
"hashbrown 0.14.2",
"hashbrown 0.14.3",
"quickcheck",
"quickcheck_macros",
"serde",
@ -447,12 +433,6 @@ dependencies = [
"rustc-demangle",
]
[[package]]
name = "base64"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "base64"
version = "0.21.5"
@ -618,18 +598,18 @@ dependencies = [
[[package]]
name = "clap"
version = "4.4.8"
version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64"
checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.4.8"
version = "4.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc"
checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb"
dependencies = [
"anstyle",
"clap_lex 0.6.0",
@ -652,11 +632,10 @@ checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1"
[[package]]
name = "colored"
version = "2.0.4"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8"
dependencies = [
"is-terminal",
"lazy_static",
"windows-sys 0.48.0",
]
@ -684,19 +663,6 @@ dependencies = [
"cache-padded",
]
[[package]]
name = "console"
version = "0.15.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"unicode-width",
"windows-sys 0.45.0",
]
[[package]]
name = "constant_time_eq"
version = "0.3.0"
@ -756,7 +722,7 @@ dependencies = [
"anes",
"cast",
"ciborium",
"clap 4.4.8",
"clap 4.4.11",
"criterion-plot",
"is-terminal",
"itertools",
@ -862,19 +828,25 @@ dependencies = [
[[package]]
name = "data-encoding"
version = "2.4.0"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308"
checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5"
[[package]]
name = "deranged"
version = "0.3.9"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc"
dependencies = [
"powerfmt",
]
[[package]]
name = "diff"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]]
name = "digest"
version = "0.10.7"
@ -913,12 +885,6 @@ version = "1.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1056f553da426e9c025a662efa48b52e62e0a3a7648aa2d15aeaaf7f0d329357"
[[package]]
name = "encode_unicode"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "endian-type"
version = "0.1.2"
@ -954,12 +920,12 @@ dependencies = [
[[package]]
name = "errno"
version = "0.3.7"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8"
checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
dependencies = [
"libc",
"windows-sys 0.48.0",
"windows-sys 0.52.0",
]
[[package]]
@ -1027,9 +993,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "form_urlencoded"
version = "1.2.0"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [
"percent-encoding",
]
@ -1173,9 +1139,9 @@ dependencies = [
[[package]]
name = "gimli"
version = "0.28.0"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "git-testament"
@ -1267,9 +1233,9 @@ dependencies = [
[[package]]
name = "hashbrown"
version = "0.14.2"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
dependencies = [
"ahash 0.8.6",
"allocator-api2",
@ -1278,11 +1244,11 @@ dependencies = [
[[package]]
name = "hdrhistogram"
version = "7.5.3"
version = "7.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5b38e5c02b7c7be48c8dc5217c4f1634af2ea221caae2e024bffc7a7651c691"
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
dependencies = [
"base64 0.13.1",
"base64",
"byteorder",
"crossbeam-channel",
"flate2",
@ -1330,9 +1296,9 @@ dependencies = [
[[package]]
name = "http-body"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
"http",
@ -1391,9 +1357,9 @@ dependencies = [
[[package]]
name = "idna"
version = "0.4.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
dependencies = [
"unicode-bidi",
"unicode-normalization",
@ -1416,20 +1382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f"
dependencies = [
"equivalent",
"hashbrown 0.14.2",
]
[[package]]
name = "indicatif"
version = "0.17.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25"
dependencies = [
"console",
"instant",
"number_prefix",
"portable-atomic",
"unicode-width",
"hashbrown 0.14.3",
]
[[package]]
@ -1488,15 +1441,15 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.9"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "js-sys"
version = "0.3.65"
version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8"
checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca"
dependencies = [
"wasm-bindgen",
]
@ -1605,9 +1558,9 @@ dependencies = [
[[package]]
name = "linux-raw-sys"
version = "0.4.11"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456"
[[package]]
name = "lock_api"
@ -1693,7 +1646,7 @@ version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5"
dependencies = [
"base64 0.21.5",
"base64",
"hyper",
"indexmap 1.9.3",
"ipnet",
@ -1760,9 +1713,9 @@ dependencies = [
[[package]]
name = "mio"
version = "0.8.9"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0"
checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09"
dependencies = [
"libc",
"log",
@ -1895,12 +1848,6 @@ dependencies = [
"libc",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "object"
version = "0.32.1"
@ -1912,9 +1859,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.18.0"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "oorandom"
@ -1950,10 +1897,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae"
[[package]]
name = "percent-encoding"
version = "2.3.0"
name = "parking_lot"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
dependencies = [
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e"
dependencies = [
"cfg-if",
"libc",
"redox_syscall",
"smallvec",
"windows-targets 0.48.5",
]
[[package]]
name = "percent-encoding"
version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "pin-project"
@ -2023,9 +1993,9 @@ dependencies = [
[[package]]
name = "portable-atomic"
version = "1.5.1"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "powerfmt"
@ -2039,6 +2009,16 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "pretty_assertions"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66"
dependencies = [
"diff",
"yansi",
]
[[package]]
name = "privdrop"
version = "0.5.4"
@ -2075,9 +2055,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.69"
version = "1.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b"
dependencies = [
"unicode-ident",
]
@ -2248,9 +2228,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "ring"
version = "0.17.5"
version = "0.17.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b"
checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74"
dependencies = [
"cc",
"getrandom",
@ -2277,22 +2257,22 @@ checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "rustix"
version = "0.38.24"
version = "0.38.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ad981d6c340a49cdc40a1028d9c6084ec7e9fa33fcb839cab656a267071e234"
checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316"
dependencies = [
"bitflags 2.4.1",
"errno 0.3.7",
"errno 0.3.8",
"libc",
"linux-raw-sys",
"windows-sys 0.48.0",
"windows-sys 0.52.0",
]
[[package]]
name = "rustls"
version = "0.21.9"
version = "0.21.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9"
checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba"
dependencies = [
"log",
"ring",
@ -2306,7 +2286,7 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
dependencies = [
"base64 0.21.5",
"base64",
]
[[package]]
@ -2327,9 +2307,9 @@ checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
[[package]]
name = "ryu"
version = "1.0.15"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c"
[[package]]
name = "same-file"
@ -2364,9 +2344,9 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.192"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001"
checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"serde_derive",
]
@ -2392,9 +2372,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.192"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1"
checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
@ -2465,14 +2445,14 @@ checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a"
[[package]]
name = "simple_logger"
version = "4.2.0"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2230cd5c29b815c9b699fb610b49a5ed65588f3509d9f0108be3a885da629333"
checksum = "da0ca6504625ee1aa5fda33913d2005eab98c7a42dd85f116ecce3ff54c9d3ef"
dependencies = [
"colored",
"log",
"time",
"windows-sys 0.42.0",
"windows-sys 0.48.0",
]
[[package]]
@ -2498,9 +2478,9 @@ dependencies = [
[[package]]
name = "slotmap"
version = "1.0.6"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342"
checksum = "dbff4acf519f630b3a3ddcfaea6c06b42174d9a44bc70c620e9ed1649d58b82a"
dependencies = [
"version_check",
]
@ -2609,6 +2589,16 @@ version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
[[package]]
name = "thingbuf"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4706f1bfb859af03f099ada2de3cea3e515843c2d3e93b7893f16d94a37f9415"
dependencies = [
"parking_lot",
"pin-project",
]
[[package]]
name = "thiserror"
version = "1.0.50"
@ -2687,9 +2677,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.34.0"
version = "1.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9"
checksum = "841d45b238a16291a4e1584e61820b8ae57d696cc5015c459c229ccc6990cc1c"
dependencies = [
"backtrace",
"libc",
@ -2747,9 +2737,9 @@ dependencies = [
[[package]]
name = "try-lock"
version = "0.2.4"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "tungstenite"
@ -2778,9 +2768,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "unicode-bidi"
version = "0.3.13"
version = "0.3.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416"
[[package]]
name = "unicode-ident"
@ -2797,12 +2787,6 @@ dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "untrusted"
version = "0.9.0"
@ -2811,9 +2795,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
version = "2.4.1"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5"
checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633"
dependencies = [
"form_urlencoded",
"idna",
@ -2883,9 +2867,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
version = "0.2.88"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce"
checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
@ -2893,9 +2877,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.88"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217"
checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826"
dependencies = [
"bumpalo",
"log",
@ -2908,9 +2892,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.88"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2"
checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@ -2918,9 +2902,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.88"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907"
checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283"
dependencies = [
"proc-macro2",
"quote",
@ -2931,15 +2915,15 @@ dependencies = [
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.88"
version = "0.2.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b"
checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
[[package]]
name = "web-sys"
version = "0.3.65"
version = "0.3.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85"
checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f"
dependencies = [
"js-sys",
"wasm-bindgen",
@ -2988,30 +2972,6 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.42.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
@ -3022,18 +2982,12 @@ dependencies = [
]
[[package]]
name = "windows-targets"
version = "0.42.2"
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
"windows-targets 0.52.0",
]
[[package]]
@ -3052,10 +3006,19 @@ dependencies = [
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
name = "windows-targets"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd"
dependencies = [
"windows_aarch64_gnullvm 0.52.0",
"windows_aarch64_msvc 0.52.0",
"windows_i686_gnu 0.52.0",
"windows_i686_msvc 0.52.0",
"windows_x86_64_gnu 0.52.0",
"windows_x86_64_gnullvm 0.52.0",
"windows_x86_64_msvc 0.52.0",
]
[[package]]
name = "windows_aarch64_gnullvm"
@ -3064,10 +3027,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
name = "windows_aarch64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea"
[[package]]
name = "windows_aarch64_msvc"
@ -3076,10 +3039,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
name = "windows_aarch64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef"
[[package]]
name = "windows_i686_gnu"
@ -3088,10 +3051,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
name = "windows_i686_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313"
[[package]]
name = "windows_i686_msvc"
@ -3100,10 +3063,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
name = "windows_i686_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a"
[[package]]
name = "windows_x86_64_gnu"
@ -3112,10 +3075,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
name = "windows_x86_64_gnu"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd"
[[package]]
name = "windows_x86_64_gnullvm"
@ -3124,10 +3087,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
name = "windows_x86_64_gnullvm"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e"
[[package]]
name = "windows_x86_64_msvc"
@ -3136,19 +3099,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "zerocopy"
version = "0.7.26"
name = "windows_x86_64_msvc"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0"
checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04"
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
[[package]]
name = "zerocopy"
version = "0.7.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "306dca4455518f1f31635ec308b6b3e4eb1b11758cefafc782827d0aa7acb5c7"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.26"
version = "0.7.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f"
checksum = "be912bf68235a88fbefd1b73415cb218405958d1655b2ece9035a19920bdf6ba"
dependencies = [
"proc-macro2",
"quote",

View file

@ -9,7 +9,6 @@ members = [
"crates/toml_config",
"crates/toml_config_derive",
"crates/udp",
"crates/udp_bench",
"crates/udp_load_test",
"crates/udp_protocol",
"crates/ws",

View file

@ -2,8 +2,9 @@
## High priority
* aquatic_ws
* Validate SDP data
* CI transfer test
* add udp with io_uring
* add HTTP without TLS
* http
* panic sentinel not working

View file

@ -20,4 +20,5 @@ compact_str = "0.7"
hex = "0.4"
regex = "1"
serde = { version = "1", features = ["derive"] }
quickcheck = { version = "1", optional = true }
quickcheck = { version = "1", optional = true }
zerocopy = { version = "0.7", features = ["derive"] }

View file

@ -3,8 +3,24 @@ use std::{borrow::Cow, fmt::Display, sync::OnceLock};
use compact_str::{format_compact, CompactString};
use regex::bytes::Regex;
use serde::{Deserialize, Serialize};
use zerocopy::{AsBytes, FromBytes, FromZeroes};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Serialize,
Deserialize,
AsBytes,
FromBytes,
FromZeroes,
)]
#[repr(transparent)]
pub struct PeerId(pub [u8; 20]);
impl PeerId {

View file

@ -19,9 +19,12 @@ name = "aquatic_udp"
[features]
default = ["prometheus"]
cpu-pinning = ["aquatic_common/hwloc"]
# Export prometheus metrics
prometheus = ["metrics", "metrics-util", "metrics-exporter-prometheus"]
# Experimental io_uring support (Linux 6.0 or later required)
io-uring = ["dep:io-uring"]
# Experimental CPU pinning support
cpu-pinning = ["aquatic_common/hwloc"]
[dependencies]
aquatic_common.workspace = true
@ -38,12 +41,8 @@ getrandom = "0.2"
hashbrown = { version = "0.14", default-features = false }
hdrhistogram = "7"
hex = "0.4"
io-uring = { version = "0.6", optional = true }
libc = "0.2"
log = "0.4"
metrics = { version = "0.21", optional = true }
metrics-util = { version = "0.15", optional = true }
metrics-exporter-prometheus = { version = "0.12", optional = true, default-features = false, features = ["http-listener"] }
mimalloc = { version = "0.1", default-features = false }
mio = { version = "0.8", features = ["net", "os-poll"] }
num-format = "0.4"
@ -52,9 +51,18 @@ serde = { version = "1", features = ["derive"] }
signal-hook = { version = "0.3" }
slab = "0.4"
socket2 = { version = "0.5", features = ["all"] }
thingbuf = "0.1"
time = { version = "0.3", features = ["formatting"] }
tinytemplate = "1"
# prometheus feature
metrics = { version = "0.21", optional = true }
metrics-util = { version = "0.15", optional = true }
metrics-exporter-prometheus = { version = "0.12", optional = true, default-features = false, features = ["http-listener"] }
# io-uring feature
io-uring = { version = "0.6", optional = true }
[dev-dependencies]
hex = "0.4"
tempfile = "3"

View file

@ -1,6 +1,8 @@
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::hash::Hash;
use std::net::{Ipv4Addr, Ipv6Addr};
use std::io::Write;
use std::net::{SocketAddr, SocketAddrV4};
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
@ -10,11 +12,56 @@ use aquatic_common::access_list::AccessListArcSwap;
use aquatic_common::CanonicalSocketAddr;
use aquatic_udp_protocol::*;
use hdrhistogram::Histogram;
use thingbuf::mpsc::blocking::SendRef;
use crate::config::Config;
pub const BUFFER_SIZE: usize = 8192;
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum CowResponse<'a> {
Connect(Cow<'a, ConnectResponse>),
AnnounceIpv4(Cow<'a, AnnounceResponse<Ipv4AddrBytes>>),
AnnounceIpv6(Cow<'a, AnnounceResponse<Ipv6AddrBytes>>),
Scrape(Cow<'a, ScrapeResponse>),
Error(Cow<'a, ErrorResponse>),
}
impl From<Response> for CowResponse<'_> {
fn from(value: Response) -> Self {
match value {
Response::AnnounceIpv4(r) => Self::AnnounceIpv4(Cow::Owned(r)),
Response::AnnounceIpv6(r) => Self::AnnounceIpv6(Cow::Owned(r)),
Response::Connect(r) => Self::Connect(Cow::Owned(r)),
Response::Scrape(r) => Self::Scrape(Cow::Owned(r)),
Response::Error(r) => Self::Error(Cow::Owned(r)),
}
}
}
impl<'a> CowResponse<'a> {
pub fn into_owned(self) -> Response {
match self {
CowResponse::Connect(r) => Response::Connect(r.into_owned()),
CowResponse::AnnounceIpv4(r) => Response::AnnounceIpv4(r.into_owned()),
CowResponse::AnnounceIpv6(r) => Response::AnnounceIpv6(r.into_owned()),
CowResponse::Scrape(r) => Response::Scrape(r.into_owned()),
CowResponse::Error(r) => Response::Error(r.into_owned()),
}
}
#[inline]
pub fn write(&self, bytes: &mut impl Write) -> Result<(), ::std::io::Error> {
match self {
Self::Connect(r) => r.write(bytes),
Self::AnnounceIpv4(r) => r.write(bytes),
Self::AnnounceIpv6(r) => r.write(bytes),
Self::Scrape(r) => r.write(bytes),
Self::Error(r) => r.write(bytes),
}
}
}
#[derive(Debug)]
pub struct PendingScrapeRequest {
pub slab_key: usize,
@ -35,11 +82,48 @@ pub enum ConnectedRequest {
#[derive(Debug)]
pub enum ConnectedResponse {
AnnounceIpv4(AnnounceResponse<Ipv4Addr>),
AnnounceIpv6(AnnounceResponse<Ipv6Addr>),
AnnounceIpv4(AnnounceResponse<Ipv4AddrBytes>),
AnnounceIpv6(AnnounceResponse<Ipv6AddrBytes>),
Scrape(PendingScrapeResponse),
}
pub enum ConnectedResponseKind {
AnnounceIpv4,
AnnounceIpv6,
Scrape,
}
pub struct ConnectedResponseWithAddr {
pub kind: ConnectedResponseKind,
pub announce_ipv4: AnnounceResponse<Ipv4AddrBytes>,
pub announce_ipv6: AnnounceResponse<Ipv6AddrBytes>,
pub scrape: PendingScrapeResponse,
pub addr: CanonicalSocketAddr,
}
pub struct Recycler;
impl thingbuf::Recycle<ConnectedResponseWithAddr> for Recycler {
fn new_element(&self) -> ConnectedResponseWithAddr {
ConnectedResponseWithAddr {
kind: ConnectedResponseKind::AnnounceIpv4,
announce_ipv4: AnnounceResponse::empty(),
announce_ipv6: AnnounceResponse::empty(),
scrape: PendingScrapeResponse {
slab_key: 0,
torrent_stats: Default::default(),
},
addr: CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(0.into(), 0))),
}
}
fn recycle(&self, element: &mut ConnectedResponseWithAddr) {
element.announce_ipv4.peers.clear();
element.announce_ipv6.peers.clear();
element.scrape.torrent_stats.clear();
element.addr = CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(0.into(), 0)));
}
}
#[derive(Clone, Copy, Debug)]
pub struct SocketWorkerIndex(pub usize);
@ -65,17 +149,19 @@ impl ConnectedRequestSender {
Self { index, senders }
}
pub fn any_full(&self) -> bool {
self.senders.iter().any(|sender| sender.is_full())
}
pub fn try_send_to(
&self,
index: SwarmWorkerIndex,
request: ConnectedRequest,
addr: CanonicalSocketAddr,
) {
) -> Result<(), (SwarmWorkerIndex, ConnectedRequest, CanonicalSocketAddr)> {
match self.senders[index.0].try_send((self.index, request, addr)) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
::log::error!("Request channel {} is full, dropping request. Try increasing number of swarm workers or raising config.worker_channel_size.", index.0)
}
Ok(()) => Ok(()),
Err(TrySendError::Full(r)) => Err((index, r.1, r.2)),
Err(TrySendError::Disconnected(_)) => {
panic!("Request channel {} is disconnected", index.0);
}
@ -84,32 +170,34 @@ impl ConnectedRequestSender {
}
pub struct ConnectedResponseSender {
senders: Vec<Sender<(ConnectedResponse, CanonicalSocketAddr)>>,
senders: Vec<thingbuf::mpsc::blocking::Sender<ConnectedResponseWithAddr, Recycler>>,
}
impl ConnectedResponseSender {
pub fn new(senders: Vec<Sender<(ConnectedResponse, CanonicalSocketAddr)>>) -> Self {
pub fn new(
senders: Vec<thingbuf::mpsc::blocking::Sender<ConnectedResponseWithAddr, Recycler>>,
) -> Self {
Self { senders }
}
pub fn try_send_to(
pub fn try_send_ref_to(
&self,
index: SocketWorkerIndex,
response: ConnectedResponse,
addr: CanonicalSocketAddr,
) {
match self.senders[index.0].try_send((response, addr)) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
::log::error!("Response channel {} is full, dropping response. Try increasing number of socket workers or raising config.worker_channel_size.", index.0)
}
Err(TrySendError::Disconnected(_)) => {
panic!("Response channel {} is disconnected", index.0);
}
}
) -> Result<SendRef<ConnectedResponseWithAddr>, thingbuf::mpsc::errors::TrySendError> {
self.senders[index.0].try_send_ref()
}
pub fn send_ref_to(
&self,
index: SocketWorkerIndex,
) -> Result<SendRef<ConnectedResponseWithAddr>, thingbuf::mpsc::errors::Closed> {
self.senders[index.0].send_ref()
}
}
pub type ConnectedResponseReceiver =
thingbuf::mpsc::blocking::Receiver<ConnectedResponseWithAddr, Recycler>;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum PeerStatus {
Seeding,
@ -125,7 +213,7 @@ impl PeerStatus {
pub fn from_event_and_bytes_left(event: AnnounceEvent, bytes_left: NumberOfBytes) -> Self {
if event == AnnounceEvent::Stopped {
Self::Stopped
} else if bytes_left.0 == 0 {
} else if bytes_left.0.get() == 0 {
Self::Seeding
} else {
Self::Leeching
@ -207,17 +295,17 @@ mod tests {
let f = PeerStatus::from_event_and_bytes_left;
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(0)));
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(1)));
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes::new(0)));
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes::new(1)));
assert_eq!(Seeding, f(AnnounceEvent::Started, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::Started, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::Started, NumberOfBytes::new(0)));
assert_eq!(Leeching, f(AnnounceEvent::Started, NumberOfBytes::new(1)));
assert_eq!(Seeding, f(AnnounceEvent::Completed, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::Completed, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::Completed, NumberOfBytes::new(0)));
assert_eq!(Leeching, f(AnnounceEvent::Completed, NumberOfBytes::new(1)));
assert_eq!(Seeding, f(AnnounceEvent::None, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::None, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::None, NumberOfBytes::new(0)));
assert_eq!(Leeching, f(AnnounceEvent::None, NumberOfBytes::new(1)));
}
// Assumes that announce response with maximum amount of ipv6 peers will
@ -229,17 +317,19 @@ mod tests {
let config = Config::default();
let peers = ::std::iter::repeat(ResponsePeer {
ip_address: Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1),
port: Port(1),
ip_address: Ipv6AddrBytes(Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1).octets()),
port: Port::new(1),
})
.take(config.protocol.max_response_peers)
.collect();
let response = Response::AnnounceIpv6(AnnounceResponse {
transaction_id: TransactionId(1),
announce_interval: AnnounceInterval(1),
seeders: NumberOfPeers(1),
leechers: NumberOfPeers(1),
fixed: AnnounceResponseFixedData {
transaction_id: TransactionId::new(1),
announce_interval: AnnounceInterval::new(1),
seeders: NumberOfPeers::new(1),
leechers: NumberOfPeers::new(1),
},
peers,
});

View file

@ -26,8 +26,7 @@ pub struct Config {
pub swarm_workers: usize,
pub log_level: LogLevel,
/// Maximum number of items in each channel passing requests/responses
/// between workers. A value of zero means that the channels will be of
/// unbounded size.
/// between workers. A value of zero is no longer allowed.
pub worker_channel_size: usize,
/// How long to block waiting for requests in swarm workers.
///
@ -59,7 +58,7 @@ impl Default for Config {
socket_workers: 1,
swarm_workers: 1,
log_level: LogLevel::Error,
worker_channel_size: 0,
worker_channel_size: 1024 * 16,
request_channel_recv_timeout_ms: 100,
network: NetworkConfig::default(),
protocol: ProtocolConfig::default(),
@ -99,8 +98,6 @@ pub struct NetworkConfig {
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub socket_recv_buffer_size: usize,
/// Poll event capacity (mio backend only)
pub poll_event_capacity: usize,
/// Poll timeout in milliseconds (mio backend only)
pub poll_timeout_ms: u64,
/// Number of ring entries (io_uring backend only)
@ -133,7 +130,6 @@ impl Default for NetworkConfig {
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
only_ipv6: false,
socket_recv_buffer_size: 4096 * 128,
poll_event_capacity: 4096,
poll_timeout_ms: 50,
#[cfg(feature = "io-uring")]
ring_size: 1024,

View file

@ -18,7 +18,8 @@ use aquatic_common::privileges::PrivilegeDropper;
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
use common::{
ConnectedRequestSender, ConnectedResponseSender, SocketWorkerIndex, State, SwarmWorkerIndex,
ConnectedRequestSender, ConnectedResponseSender, Recycler, SocketWorkerIndex, State,
SwarmWorkerIndex,
};
use config::Config;
use workers::socket::ConnectionValidator;
@ -58,11 +59,8 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
}
for i in 0..config.socket_workers {
let (response_sender, response_receiver) = if config.worker_channel_size == 0 {
unbounded()
} else {
bounded(config.worker_channel_size)
};
let (response_sender, response_receiver) =
thingbuf::mpsc::blocking::with_recycle(config.worker_channel_size, Recycler);
response_senders.push(response_sender);
response_receivers.insert(i, response_receiver);

View file

@ -1,10 +1,10 @@
use std::borrow::Cow;
use std::io::{Cursor, ErrorKind};
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use aquatic_common::access_list::AccessListCache;
use aquatic_common::ServerStartInstant;
use crossbeam_channel::Receiver;
use mio::net::UdpSocket;
use mio::{Events, Interest, Poll, Token};
@ -21,17 +21,32 @@ use super::storage::PendingScrapeResponseSlab;
use super::validator::ConnectionValidator;
use super::{create_socket, EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
enum HandleRequestError {
RequestChannelFull(Vec<(SwarmWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>),
}
#[derive(Clone, Copy, Debug)]
enum PollMode {
Regular,
SkipPolling,
SkipReceiving,
}
pub struct SocketWorker {
config: Config,
shared_state: State,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
response_receiver: ConnectedResponseReceiver,
access_list_cache: AccessListCache,
validator: ConnectionValidator,
server_start_instant: ServerStartInstant,
pending_scrape_responses: PendingScrapeResponseSlab,
socket: UdpSocket,
opt_resend_buffer: Option<Vec<(Response, CanonicalSocketAddr)>>,
buffer: [u8; BUFFER_SIZE],
polling_mode: PollMode,
/// Storage for requests that couldn't be sent to swarm worker because channel was full
pending_requests: Vec<(SwarmWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
}
impl SocketWorker {
@ -42,12 +57,13 @@ impl SocketWorker {
validator: ConnectionValidator,
server_start_instant: ServerStartInstant,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
response_receiver: ConnectedResponseReceiver,
priv_dropper: PrivilegeDropper,
) {
let socket =
UdpSocket::from_std(create_socket(&config, priv_dropper).expect("create socket"));
let access_list_cache = create_access_list_cache(&shared_state.access_list);
let opt_resend_buffer = (config.network.resend_buffer_max_len > 0).then_some(Vec::new());
let mut worker = Self {
config,
@ -59,18 +75,17 @@ impl SocketWorker {
access_list_cache,
pending_scrape_responses: Default::default(),
socket,
opt_resend_buffer,
buffer: [0; BUFFER_SIZE],
polling_mode: PollMode::Regular,
pending_requests: Default::default(),
};
worker.run_inner();
}
pub fn run_inner(&mut self) {
let mut local_responses = Vec::new();
let mut opt_resend_buffer =
(self.config.network.resend_buffer_max_len > 0).then_some(Vec::new());
let mut events = Events::with_capacity(self.config.network.poll_event_capacity);
let mut events = Events::with_capacity(1);
let mut poll = Poll::new().expect("create poll");
poll.registry()
@ -91,17 +106,33 @@ impl SocketWorker {
let mut iter_counter = 0usize;
loop {
poll.poll(&mut events, Some(poll_timeout))
.expect("failed polling");
match self.polling_mode {
PollMode::Regular => {
poll.poll(&mut events, Some(poll_timeout))
.expect("failed polling");
for event in events.iter() {
if event.is_readable() {
self.read_and_handle_requests(&mut local_responses, pending_scrape_valid_until);
for event in events.iter() {
if event.is_readable() {
self.read_and_handle_requests(pending_scrape_valid_until);
}
}
}
PollMode::SkipPolling => {
self.polling_mode = PollMode::Regular;
// Continue reading from socket without polling, since
// reading was previouly cancelled
self.read_and_handle_requests(pending_scrape_valid_until);
}
PollMode::SkipReceiving => {
::log::info!("Postponing receiving requests because swarm worker channel is full. This means that the OS will be relied on to buffer incoming packets. To prevent this, raise config.worker_channel_size.");
self.polling_mode = PollMode::SkipPolling;
}
}
// If resend buffer is enabled, send any responses in it
if let Some(resend_buffer) = opt_resend_buffer.as_mut() {
if let Some(resend_buffer) = self.opt_resend_buffer.as_mut() {
for (response, addr) in resend_buffer.drain(..) {
Self::send_response(
&self.config,
@ -109,46 +140,23 @@ impl SocketWorker {
&mut self.socket,
&mut self.buffer,
&mut None,
response,
response.into(),
addr,
);
}
}
// Send any connect and error responses generated by this socket worker
for (response, addr) in local_responses.drain(..) {
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut opt_resend_buffer,
response,
addr,
);
}
// Check channel for any responses generated by swarm workers
for (response, addr) in self.response_receiver.try_iter() {
let opt_response = match response {
ConnectedResponse::Scrape(r) => self
.pending_scrape_responses
.add_and_get_finished(r)
.map(Response::Scrape),
ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)),
ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)),
};
self.handle_swarm_worker_responses();
if let Some(response) = opt_response {
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut opt_resend_buffer,
response,
addr,
);
// Try sending pending requests
while let Some((index, request, addr)) = self.pending_requests.pop() {
if let Err(r) = self.request_sender.try_send_to(index, request, addr) {
self.pending_requests.push(r);
self.polling_mode = PollMode::SkipReceiving;
break;
}
}
@ -174,11 +182,7 @@ impl SocketWorker {
}
}
fn read_and_handle_requests(
&mut self,
local_responses: &mut Vec<(Response, CanonicalSocketAddr)>,
pending_scrape_valid_until: ValidUntil,
) {
fn read_and_handle_requests(&mut self, pending_scrape_valid_until: ValidUntil) {
let mut requests_received_ipv4: usize = 0;
let mut requests_received_ipv6: usize = 0;
let mut bytes_received_ipv4: usize = 0;
@ -194,18 +198,19 @@ impl SocketWorker {
}
let src = CanonicalSocketAddr::new(src);
let request_parsable = match Request::from_bytes(
&self.buffer[..bytes_read],
self.config.protocol.max_scrape_torrents,
) {
Ok(request) => {
self.handle_request(
local_responses,
pending_scrape_valid_until,
request,
src,
);
if let Err(HandleRequestError::RequestChannelFull(failed_requests)) =
self.handle_request(pending_scrape_valid_until, request, src)
{
self.pending_requests.extend(failed_requests.into_iter());
self.polling_mode = PollMode::SkipReceiving;
break;
}
true
}
@ -221,10 +226,18 @@ impl SocketWorker {
if self.validator.connection_id_valid(src, connection_id) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
message: err.into(),
};
local_responses.push((response.into(), src));
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut self.opt_resend_buffer,
CowResponse::Error(Cow::Owned(response)),
src,
);
}
}
@ -276,23 +289,32 @@ impl SocketWorker {
fn handle_request(
&mut self,
local_responses: &mut Vec<(Response, CanonicalSocketAddr)>,
pending_scrape_valid_until: ValidUntil,
request: Request,
src: CanonicalSocketAddr,
) {
) -> Result<(), HandleRequestError> {
let access_list_mode = self.config.access_list.mode;
match request {
Request::Connect(request) => {
let connection_id = self.validator.create_connection_id(src);
let response = Response::Connect(ConnectResponse {
let response = ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
});
};
local_responses.push((response, src))
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut self.opt_resend_buffer,
CowResponse::Connect(Cow::Owned(response)),
src,
);
Ok(())
}
Request::Announce(request) => {
if self
@ -307,19 +329,31 @@ impl SocketWorker {
let worker_index =
SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash);
self.request_sender.try_send_to(
worker_index,
ConnectedRequest::Announce(request),
src,
);
self.request_sender
.try_send_to(worker_index, ConnectedRequest::Announce(request), src)
.map_err(|request| {
HandleRequestError::RequestChannelFull(vec![request])
})
} else {
let response = Response::Error(ErrorResponse {
let response = ErrorResponse {
transaction_id: request.transaction_id,
message: "Info hash not allowed".into(),
});
};
local_responses.push((response, src))
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut self.opt_resend_buffer,
CowResponse::Error(Cow::Owned(response)),
src,
);
Ok(())
}
} else {
Ok(())
}
}
Request::Scrape(request) => {
@ -333,36 +367,87 @@ impl SocketWorker {
pending_scrape_valid_until,
);
let mut failed = Vec::new();
for (swarm_worker_index, request) in split_requests {
self.request_sender.try_send_to(
if let Err(request) = self.request_sender.try_send_to(
swarm_worker_index,
ConnectedRequest::Scrape(request),
src,
);
) {
failed.push(request);
}
}
if failed.is_empty() {
Ok(())
} else {
Err(HandleRequestError::RequestChannelFull(failed))
}
} else {
Ok(())
}
}
}
}
fn handle_swarm_worker_responses(&mut self) {
loop {
let recv_ref = if let Ok(recv_ref) = self.response_receiver.try_recv_ref() {
recv_ref
} else {
break;
};
let response = match recv_ref.kind {
ConnectedResponseKind::Scrape => {
if let Some(r) = self
.pending_scrape_responses
.add_and_get_finished(&recv_ref.scrape)
{
CowResponse::Scrape(Cow::Owned(r))
} else {
continue;
}
}
ConnectedResponseKind::AnnounceIpv4 => {
CowResponse::AnnounceIpv4(Cow::Borrowed(&recv_ref.announce_ipv4))
}
ConnectedResponseKind::AnnounceIpv6 => {
CowResponse::AnnounceIpv6(Cow::Borrowed(&recv_ref.announce_ipv6))
}
};
Self::send_response(
&self.config,
&self.shared_state,
&mut self.socket,
&mut self.buffer,
&mut self.opt_resend_buffer,
response,
recv_ref.addr,
);
}
}
fn send_response(
config: &Config,
shared_state: &State,
socket: &mut UdpSocket,
buffer: &mut [u8],
opt_resend_buffer: &mut Option<Vec<(Response, CanonicalSocketAddr)>>,
response: Response,
response: CowResponse,
canonical_addr: CanonicalSocketAddr,
) {
let mut cursor = Cursor::new(buffer);
let mut buffer = Cursor::new(&mut buffer[..]);
if let Err(err) = response.write(&mut cursor) {
::log::error!("Converting response to bytes failed: {:#}", err);
if let Err(err) = response.write(&mut buffer) {
::log::error!("failed writing response to buffer: {:#}", err);
return;
}
let bytes_written = cursor.position() as usize;
let bytes_written = buffer.position() as usize;
let addr = if config.network.address.is_ipv4() {
canonical_addr
@ -372,7 +457,7 @@ impl SocketWorker {
canonical_addr.get_ipv6_mapped()
};
match socket.send_to(&cursor.get_ref()[..bytes_written], addr) {
match socket.send_to(&buffer.into_inner()[..bytes_written], addr) {
Ok(amt) if config.statistics.active() => {
let stats = if canonical_addr.is_ipv4() {
let stats = &shared_state.statistics_ipv4;
@ -393,18 +478,18 @@ impl SocketWorker {
};
match response {
Response::Connect(_) => {
CowResponse::Connect(_) => {
stats.responses_sent_connect.fetch_add(1, Ordering::Relaxed);
}
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => {
CowResponse::AnnounceIpv4(_) | CowResponse::AnnounceIpv6(_) => {
stats
.responses_sent_announce
.fetch_add(1, Ordering::Relaxed);
}
Response::Scrape(_) => {
CowResponse::Scrape(_) => {
stats.responses_sent_scrape.fetch_add(1, Ordering::Relaxed);
}
Response::Error(_) => {
CowResponse::Error(_) => {
stats.responses_sent_error.fetch_add(1, Ordering::Relaxed);
}
}
@ -418,7 +503,7 @@ impl SocketWorker {
if resend_buffer.len() < config.network.resend_buffer_max_len {
::log::info!("Adding response to resend queue, since sending it to {} failed with: {:#}", addr, err);
resend_buffer.push((response, canonical_addr));
resend_buffer.push((response.into_owned(), canonical_addr));
} else {
::log::warn!("Response resend buffer full, dropping response");
}

View file

@ -1,23 +1,23 @@
mod mio;
mod storage;
#[cfg(feature = "io-uring")]
#[cfg(all(target_os = "linux", feature = "io-uring"))]
mod uring;
mod validator;
use anyhow::Context;
use aquatic_common::{
privileges::PrivilegeDropper, CanonicalSocketAddr, PanicSentinel, ServerStartInstant,
};
use crossbeam_channel::Receiver;
use aquatic_common::{privileges::PrivilegeDropper, PanicSentinel, ServerStartInstant};
use socket2::{Domain, Protocol, Socket, Type};
use crate::{
common::{ConnectedRequestSender, ConnectedResponse, State},
common::{ConnectedRequestSender, ConnectedResponseReceiver, State},
config::Config,
};
pub use self::validator::ConnectionValidator;
#[cfg(all(not(target_os = "linux"), feature = "io-uring"))]
compile_error!("io_uring feature is only supported on Linux");
/// Bytes of data transmitted when sending an IPv4 UDP packet, in addition to payload size
///
/// Consists of:
@ -43,10 +43,10 @@ pub fn run_socket_worker(
validator: ConnectionValidator,
server_start_instant: ServerStartInstant,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
response_receiver: ConnectedResponseReceiver,
priv_dropper: PrivilegeDropper,
) {
#[cfg(feature = "io-uring")]
#[cfg(all(target_os = "linux", feature = "io-uring"))]
match self::uring::supported_on_current_kernel() {
Ok(()) => {
self::uring::SocketWorker::run(

View file

@ -65,14 +65,12 @@ impl PendingScrapeResponseSlab {
pub fn add_and_get_finished(
&mut self,
response: PendingScrapeResponse,
response: &PendingScrapeResponse,
) -> Option<ScrapeResponse> {
let finished = if let Some(entry) = self.0.get_mut(response.slab_key) {
entry.num_pending -= 1;
entry
.torrent_stats
.extend(response.torrent_stats.into_iter());
entry.torrent_stats.extend(response.torrent_stats.iter());
entry.num_pending == 0
} else {
@ -156,8 +154,8 @@ mod tests {
}
let request = ScrapeRequest {
transaction_id: TransactionId(t),
connection_id: ConnectionId(c),
transaction_id: TransactionId::new(t),
connection_id: ConnectionId::new(c),
info_hashes,
};
@ -192,9 +190,9 @@ mod tests {
(
i,
TorrentScrapeStatistics {
seeders: NumberOfPeers((info_hash.0[0]) as i32),
leechers: NumberOfPeers(0),
completed: NumberOfDownloads(0),
seeders: NumberOfPeers::new((info_hash.0[0]) as i32),
leechers: NumberOfPeers::new(0),
completed: NumberOfDownloads::new(0),
},
)
})
@ -205,7 +203,7 @@ mod tests {
torrent_stats,
};
if let Some(response) = map.add_and_get_finished(response) {
if let Some(response) = map.add_and_get_finished(&response) {
responses.push(response);
}
}

View file

@ -2,6 +2,7 @@ mod buf_ring;
mod recv_helper;
mod send_buffers;
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::net::UdpSocket;
@ -12,7 +13,6 @@ use std::sync::atomic::Ordering;
use anyhow::Context;
use aquatic_common::access_list::AccessListCache;
use aquatic_common::ServerStartInstant;
use crossbeam_channel::Receiver;
use io_uring::opcode::Timeout;
use io_uring::types::{Fixed, Timespec};
use io_uring::{IoUring, Probe};
@ -36,8 +36,10 @@ use super::{create_socket, EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
/// Size of each request buffer
///
/// Enough for scrape request with 20 info hashes
const REQUEST_BUF_LEN: usize = 256;
/// Needs to fit recvmsg metadata in addition to the payload.
///
/// The payload of a scrape request with 20 info hashes fits in 256 bytes.
const REQUEST_BUF_LEN: usize = 512;
/// Size of each response buffer
///
@ -76,7 +78,7 @@ pub struct SocketWorker {
config: Config,
shared_state: State,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
response_receiver: ConnectedResponseReceiver,
access_list_cache: AccessListCache,
validator: ConnectionValidator,
server_start_instant: ServerStartInstant,
@ -102,7 +104,7 @@ impl SocketWorker {
validator: ConnectionValidator,
server_start_instant: ServerStartInstant,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
response_receiver: ConnectedResponseReceiver,
priv_dropper: PrivilegeDropper,
) {
let ring_entries = config.network.ring_size.next_power_of_two();
@ -111,6 +113,7 @@ impl SocketWorker {
let socket = create_socket(&config, priv_dropper).expect("create socket");
let access_list_cache = create_access_list_cache(&shared_state.access_list);
let send_buffers = SendBuffers::new(&config, send_buffer_entries as usize);
let recv_helper = RecvHelper::new(&config);
@ -207,14 +210,15 @@ impl SocketWorker {
// Enqueue local responses
for _ in 0..sq_space {
if let Some((response, addr)) = self.local_responses.pop_front() {
match self.send_buffers.prepare_entry(&response, addr) {
match self.send_buffers.prepare_entry(response.into(), addr) {
Ok(entry) => {
unsafe { ring.submission().push(&entry).unwrap() };
num_send_added += 1;
}
Err(send_buffers::Error::NoBuffers) => {
self.local_responses.push_front((response, addr));
Err(send_buffers::Error::NoBuffers(response)) => {
self.local_responses
.push_front((response.into_owned(), addr));
break;
}
@ -229,24 +233,46 @@ impl SocketWorker {
// Enqueue swarm worker responses
for _ in 0..(sq_space - num_send_added) {
if let Some((response, addr)) = self.get_next_swarm_response() {
match self.send_buffers.prepare_entry(&response, addr) {
Ok(entry) => {
unsafe { ring.submission().push(&entry).unwrap() };
num_send_added += 1;
}
Err(send_buffers::Error::NoBuffers) => {
self.local_responses.push_back((response, addr));
break;
}
Err(send_buffers::Error::SerializationFailed(err)) => {
::log::error!("Failed serializing response: {:#}", err);
}
}
let recv_ref = if let Ok(recv_ref) = self.response_receiver.try_recv_ref() {
recv_ref
} else {
break;
};
let response = match recv_ref.kind {
ConnectedResponseKind::AnnounceIpv4 => {
CowResponse::AnnounceIpv4(Cow::Borrowed(&recv_ref.announce_ipv4))
}
ConnectedResponseKind::AnnounceIpv6 => {
CowResponse::AnnounceIpv6(Cow::Borrowed(&recv_ref.announce_ipv6))
}
ConnectedResponseKind::Scrape => {
if let Some(response) = self
.pending_scrape_responses
.add_and_get_finished(&recv_ref.scrape)
{
CowResponse::Scrape(Cow::Owned(response))
} else {
continue;
}
}
};
match self.send_buffers.prepare_entry(response, recv_ref.addr) {
Ok(entry) => {
unsafe { ring.submission().push(&entry).unwrap() };
num_send_added += 1;
}
Err(send_buffers::Error::NoBuffers(response)) => {
self.local_responses
.push_back((response.into_owned(), recv_ref.addr));
break;
}
Err(send_buffers::Error::SerializationFailed(err)) => {
::log::error!("Failed serializing response: {:#}", err);
}
}
}
@ -280,12 +306,6 @@ impl SocketWorker {
self.config.cleaning.max_pending_scrape_age,
);
::log::info!(
"pending responses: {} local, {} swarm",
self.local_responses.len(),
self.response_receiver.len()
);
self.resubmittable_sqe_buf
.push(self.pulse_timeout_sqe.clone());
}
@ -372,9 +392,7 @@ impl SocketWorker {
}
};
let buffer = buffer.as_slice();
let addr = match self.recv_helper.parse(buffer) {
let addr = match self.recv_helper.parse(buffer.as_slice()) {
Ok((request, addr)) => {
self.handle_request(request, addr);
@ -392,7 +410,7 @@ impl SocketWorker {
if self.validator.connection_id_valid(addr, connection_id) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
message: err.into(),
};
self.local_responses.push_back((response.into(), addr));
@ -413,6 +431,11 @@ impl SocketWorker {
Err(self::recv_helper::Error::RecvMsgParseError) => {
::log::error!("RecvMsgOut::parse failed");
return;
}
Err(self::recv_helper::Error::RecvMsgTruncated) => {
::log::warn!("RecvMsgOut::parse failed: sockaddr or payload truncated");
return;
}
};
@ -458,11 +481,13 @@ impl SocketWorker {
let worker_index =
SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash);
self.request_sender.try_send_to(
if let Err(_) = self.request_sender.try_send_to(
worker_index,
ConnectedRequest::Announce(request),
src,
);
) {
::log::warn!("request sender full, dropping request");
}
} else {
let response = Response::Error(ErrorResponse {
transaction_id: request.transaction_id,
@ -485,39 +510,18 @@ impl SocketWorker {
);
for (swarm_worker_index, request) in split_requests {
self.request_sender.try_send_to(
if let Err(_) = self.request_sender.try_send_to(
swarm_worker_index,
ConnectedRequest::Scrape(request),
src,
);
) {
::log::warn!("request sender full, dropping request");
}
}
}
}
}
}
fn get_next_swarm_response(&mut self) -> Option<(Response, CanonicalSocketAddr)> {
loop {
match self.response_receiver.try_recv() {
Ok((ConnectedResponse::AnnounceIpv4(response), addr)) => {
return Some((Response::AnnounceIpv4(response), addr));
}
Ok((ConnectedResponse::AnnounceIpv6(response), addr)) => {
return Some((Response::AnnounceIpv6(response), addr));
}
Ok((ConnectedResponse::Scrape(response), addr)) => {
if let Some(response) =
self.pending_scrape_responses.add_and_get_finished(response)
{
return Some((Response::Scrape(response), addr));
}
}
Err(_) => {
return None;
}
}
}
}
}
pub fn supported_on_current_kernel() -> anyhow::Result<()> {

View file

@ -1,5 +1,4 @@
use std::{
cell::UnsafeCell,
net::{Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
ptr::null_mut,
};
@ -14,6 +13,7 @@ use super::{SOCKET_IDENTIFIER, USER_DATA_RECV};
pub enum Error {
RecvMsgParseError,
RecvMsgTruncated,
RequestParseError(RequestParseError, CanonicalSocketAddr),
InvalidSocketAddress,
}
@ -22,24 +22,24 @@ pub struct RecvHelper {
socket_is_ipv4: bool,
max_scrape_torrents: u8,
#[allow(dead_code)]
name_v4: Box<UnsafeCell<libc::sockaddr_in>>,
msghdr_v4: Box<UnsafeCell<libc::msghdr>>,
name_v4: *const libc::sockaddr_in,
msghdr_v4: *const libc::msghdr,
#[allow(dead_code)]
name_v6: Box<UnsafeCell<libc::sockaddr_in6>>,
msghdr_v6: Box<UnsafeCell<libc::msghdr>>,
name_v6: *const libc::sockaddr_in6,
msghdr_v6: *const libc::msghdr,
}
impl RecvHelper {
pub fn new(config: &Config) -> Self {
let name_v4 = Box::new(UnsafeCell::new(libc::sockaddr_in {
let name_v4 = Box::into_raw(Box::new(libc::sockaddr_in {
sin_family: 0,
sin_port: 0,
sin_addr: libc::in_addr { s_addr: 0 },
sin_zero: [0; 8],
}));
let msghdr_v4 = Box::new(UnsafeCell::new(libc::msghdr {
msg_name: name_v4.get() as *mut libc::c_void,
let msghdr_v4 = Box::into_raw(Box::new(libc::msghdr {
msg_name: name_v4 as *mut libc::c_void,
msg_namelen: core::mem::size_of::<libc::sockaddr_in>() as u32,
msg_iov: null_mut(),
msg_iovlen: 0,
@ -48,7 +48,7 @@ impl RecvHelper {
msg_flags: 0,
}));
let name_v6 = Box::new(UnsafeCell::new(libc::sockaddr_in6 {
let name_v6 = Box::into_raw(Box::new(libc::sockaddr_in6 {
sin6_family: 0,
sin6_port: 0,
sin6_flowinfo: 0,
@ -56,8 +56,8 @@ impl RecvHelper {
sin6_scope_id: 0,
}));
let msghdr_v6 = Box::new(UnsafeCell::new(libc::msghdr {
msg_name: name_v6.get() as *mut libc::c_void,
let msghdr_v6 = Box::into_raw(Box::new(libc::msghdr {
msg_name: name_v6 as *mut libc::c_void,
msg_namelen: core::mem::size_of::<libc::sockaddr_in6>() as u32,
msg_iov: null_mut(),
msg_iovlen: 0,
@ -77,10 +77,10 @@ impl RecvHelper {
}
pub fn create_entry(&self, buf_group: u16) -> io_uring::squeue::Entry {
let msghdr: *const libc::msghdr = if self.socket_is_ipv4 {
self.msghdr_v4.get()
let msghdr = if self.socket_is_ipv4 {
self.msghdr_v4
} else {
self.msghdr_v6.get()
self.msghdr_v6
};
RecvMsgMulti::new(SOCKET_IDENTIFIER, msghdr, buf_group)
@ -90,51 +90,51 @@ impl RecvHelper {
pub fn parse(&self, buffer: &[u8]) -> Result<(Request, CanonicalSocketAddr), Error> {
let (msg, addr) = if self.socket_is_ipv4 {
let msg = unsafe {
let msghdr = &*(self.msghdr_v4.get() as *const _);
// Safe as long as kernel only reads from the pointer and doesn't
// write to it. I think this is the case.
let msghdr = unsafe { self.msghdr_v4.read() };
RecvMsgOut::parse(buffer, msghdr).map_err(|_| Error::RecvMsgParseError)?
};
let msg = RecvMsgOut::parse(buffer, &msghdr).map_err(|_| Error::RecvMsgParseError)?;
let addr = unsafe {
let name_data = *(msg.name_data().as_ptr() as *const libc::sockaddr_in);
SocketAddr::V4(SocketAddrV4::new(
u32::from_be(name_data.sin_addr.s_addr).into(),
u16::from_be(name_data.sin_port),
))
};
if addr.port() == 0 {
return Err(Error::InvalidSocketAddress);
if msg.is_name_data_truncated() | msg.is_payload_truncated() {
return Err(Error::RecvMsgTruncated);
}
let name_data = unsafe { *(msg.name_data().as_ptr() as *const libc::sockaddr_in) };
let addr = SocketAddr::V4(SocketAddrV4::new(
u32::from_be(name_data.sin_addr.s_addr).into(),
u16::from_be(name_data.sin_port),
));
(msg, addr)
} else {
let msg = unsafe {
let msghdr = &*(self.msghdr_v6.get() as *const _);
// Safe as long as kernel only reads from the pointer and doesn't
// write to it. I think this is the case.
let msghdr = unsafe { self.msghdr_v6.read() };
RecvMsgOut::parse(buffer, msghdr).map_err(|_| Error::RecvMsgParseError)?
};
let msg = RecvMsgOut::parse(buffer, &msghdr).map_err(|_| Error::RecvMsgParseError)?;
let addr = unsafe {
let name_data = *(msg.name_data().as_ptr() as *const libc::sockaddr_in6);
SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::from(name_data.sin6_addr.s6_addr),
u16::from_be(name_data.sin6_port),
u32::from_be(name_data.sin6_flowinfo),
u32::from_be(name_data.sin6_scope_id),
))
};
if addr.port() == 0 {
return Err(Error::InvalidSocketAddress);
if msg.is_name_data_truncated() | msg.is_payload_truncated() {
return Err(Error::RecvMsgTruncated);
}
let name_data = unsafe { *(msg.name_data().as_ptr() as *const libc::sockaddr_in6) };
let addr = SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::from(name_data.sin6_addr.s6_addr),
u16::from_be(name_data.sin6_port),
u32::from_be(name_data.sin6_flowinfo),
u32::from_be(name_data.sin6_scope_id),
));
(msg, addr)
};
if addr.port() == 0 {
return Err(Error::InvalidSocketAddress);
}
let addr = CanonicalSocketAddr::new(addr);
let request = Request::from_bytes(msg.payload_data(), self.max_scrape_torrents)

View file

@ -1,186 +1,35 @@
use std::{cell::UnsafeCell, io::Cursor, net::SocketAddr, ops::IndexMut, ptr::null_mut};
use std::{
io::Cursor,
iter::repeat_with,
net::SocketAddr,
ptr::{addr_of_mut, null_mut},
};
use aquatic_common::CanonicalSocketAddr;
use aquatic_udp_protocol::Response;
use io_uring::opcode::SendMsg;
use crate::config::Config;
use crate::{common::CowResponse, config::Config};
use super::{RESPONSE_BUF_LEN, SOCKET_IDENTIFIER};
pub enum Error {
NoBuffers,
pub enum Error<'a> {
NoBuffers(CowResponse<'a>),
SerializationFailed(std::io::Error),
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ResponseType {
Connect,
Announce,
Scrape,
Error,
}
impl ResponseType {
fn from_response(response: &Response) -> Self {
match response {
Response::Connect(_) => Self::Connect,
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => Self::Announce,
Response::Scrape(_) => Self::Scrape,
Response::Error(_) => Self::Error,
}
}
}
struct SendBuffer {
name_v4: UnsafeCell<libc::sockaddr_in>,
name_v6: UnsafeCell<libc::sockaddr_in6>,
bytes: UnsafeCell<[u8; RESPONSE_BUF_LEN]>,
iovec: UnsafeCell<libc::iovec>,
msghdr: UnsafeCell<libc::msghdr>,
free: bool,
/// Only used for statistics
receiver_is_ipv4: bool,
/// Only used for statistics
response_type: ResponseType,
}
impl SendBuffer {
fn new_with_null_pointers() -> Self {
Self {
name_v4: UnsafeCell::new(libc::sockaddr_in {
sin_family: libc::AF_INET as u16,
sin_port: 0,
sin_addr: libc::in_addr { s_addr: 0 },
sin_zero: [0; 8],
}),
name_v6: UnsafeCell::new(libc::sockaddr_in6 {
sin6_family: libc::AF_INET6 as u16,
sin6_port: 0,
sin6_flowinfo: 0,
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
sin6_scope_id: 0,
}),
bytes: UnsafeCell::new([0; RESPONSE_BUF_LEN]),
iovec: UnsafeCell::new(libc::iovec {
iov_base: null_mut(),
iov_len: 0,
}),
msghdr: UnsafeCell::new(libc::msghdr {
msg_name: null_mut(),
msg_namelen: 0,
msg_iov: null_mut(),
msg_iovlen: 1,
msg_control: null_mut(),
msg_controllen: 0,
msg_flags: 0,
}),
free: true,
receiver_is_ipv4: true,
response_type: ResponseType::Connect,
}
}
fn setup_pointers(&mut self, socket_is_ipv4: bool) {
unsafe {
let iovec = &mut *self.iovec.get();
iovec.iov_base = self.bytes.get() as *mut libc::c_void;
iovec.iov_len = (&*self.bytes.get()).len();
let msghdr = &mut *self.msghdr.get();
msghdr.msg_iov = self.iovec.get();
if socket_is_ipv4 {
msghdr.msg_name = self.name_v4.get() as *mut libc::c_void;
msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
} else {
msghdr.msg_name = self.name_v6.get() as *mut libc::c_void;
msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in6>() as u32;
}
}
}
/// # Safety
///
/// - SendBuffer must be stored at a fixed location in memory
/// - SendBuffer.setup_pointers must have been called while stored at that
/// fixed location
/// - Contents of struct fields wrapped in UnsafeCell can NOT be accessed
/// simultaneously to this function call
unsafe fn prepare_entry(
&mut self,
response: &Response,
addr: CanonicalSocketAddr,
socket_is_ipv4: bool,
) -> Result<io_uring::squeue::Entry, Error> {
// Set receiver socket addr
if socket_is_ipv4 {
self.receiver_is_ipv4 = true;
let addr = if let Some(SocketAddr::V4(addr)) = addr.get_ipv4() {
addr
} else {
panic!("ipv6 address in ipv4 mode");
};
let name = &mut *self.name_v4.get();
name.sin_port = addr.port().to_be();
name.sin_addr.s_addr = u32::from(*addr.ip()).to_be();
} else {
// Set receiver protocol type before calling addr.get_ipv6_mapped()
self.receiver_is_ipv4 = addr.is_ipv4();
let addr = if let SocketAddr::V6(addr) = addr.get_ipv6_mapped() {
addr
} else {
panic!("ipv4 address when ipv6 or ipv6-mapped address expected");
};
let name = &mut *self.name_v6.get();
name.sin6_port = addr.port().to_be();
name.sin6_addr.s6_addr = addr.ip().octets();
}
let bytes = (&mut *self.bytes.get()).as_mut_slice();
let mut cursor = Cursor::new(bytes);
match response.write(&mut cursor) {
Ok(()) => {
(&mut *self.iovec.get()).iov_len = cursor.position() as usize;
self.response_type = ResponseType::from_response(response);
self.free = false;
Ok(SendMsg::new(SOCKET_IDENTIFIER, self.msghdr.get()).build())
}
Err(err) => Err(Error::SerializationFailed(err)),
}
}
}
pub struct SendBuffers {
likely_next_free_index: usize,
socket_is_ipv4: bool,
buffers: Box<[SendBuffer]>,
buffers: Vec<(SendBufferMetadata, *mut SendBuffer)>,
}
impl SendBuffers {
pub fn new(config: &Config, capacity: usize) -> Self {
let socket_is_ipv4 = config.network.address.is_ipv4();
let mut buffers = ::std::iter::repeat_with(|| SendBuffer::new_with_null_pointers())
let buffers = repeat_with(|| (Default::default(), SendBuffer::new(socket_is_ipv4)))
.take(capacity)
.collect::<Vec<_>>()
.into_boxed_slice();
for buffer in buffers.iter_mut() {
buffer.setup_pointers(socket_is_ipv4);
}
.collect::<Vec<_>>();
Self {
likely_next_free_index: 0,
@ -190,9 +39,9 @@ impl SendBuffers {
}
pub fn response_type_and_ipv4(&self, index: usize) -> (ResponseType, bool) {
let buffer = self.buffers.get(index).unwrap();
let meta = &self.buffers.get(index).unwrap().0;
(buffer.response_type, buffer.receiver_is_ipv4)
(meta.response_type, meta.receiver_is_ipv4)
}
/// # Safety
@ -200,7 +49,7 @@ impl SendBuffers {
/// Only safe to call once buffer is no longer referenced by in-flight
/// io_uring queue entries
pub unsafe fn mark_buffer_as_free(&mut self, index: usize) {
self.buffers[index].free = true;
self.buffers[index].0.free = true;
}
/// Call after going through completion queue
@ -208,44 +57,192 @@ impl SendBuffers {
self.likely_next_free_index = 0;
}
pub fn prepare_entry(
pub fn prepare_entry<'a>(
&mut self,
response: &Response,
response: CowResponse<'a>,
addr: CanonicalSocketAddr,
) -> Result<io_uring::squeue::Entry, Error> {
let index = self.next_free_index()?;
) -> Result<io_uring::squeue::Entry, Error<'a>> {
let index = if let Some(index) = self.next_free_index() {
index
} else {
return Err(Error::NoBuffers(response));
};
let buffer = self.buffers.index_mut(index);
let (buffer_metadata, buffer) = self.buffers.get_mut(index).unwrap();
// Safety: OK because buffers are stored in fixed memory location,
// buffer pointers were set up in SendBuffers::new() and pointers to
// SendBuffer UnsafeCell contents are not accessed elsewhere
unsafe {
match buffer.prepare_entry(response, addr, self.socket_is_ipv4) {
Ok(entry) => {
self.likely_next_free_index = index + 1;
// Safe as long as `mark_buffer_as_free` was used correctly
let buffer = unsafe { &mut *(*buffer) };
Ok(entry.user_data(index as u64))
}
Err(err) => Err(err),
match buffer.prepare_entry(response, addr, self.socket_is_ipv4, buffer_metadata) {
Ok(entry) => {
buffer_metadata.free = false;
self.likely_next_free_index = index + 1;
Ok(entry.user_data(index as u64))
}
Err(err) => Err(err),
}
}
fn next_free_index(&self) -> Result<usize, Error> {
fn next_free_index(&self) -> Option<usize> {
if self.likely_next_free_index >= self.buffers.len() {
return Err(Error::NoBuffers);
return None;
}
for (i, buffer) in self.buffers[self.likely_next_free_index..]
for (i, (meta, _)) in self.buffers[self.likely_next_free_index..]
.iter()
.enumerate()
{
if buffer.free {
return Ok(self.likely_next_free_index + i);
if meta.free {
return Some(self.likely_next_free_index + i);
}
}
Err(Error::NoBuffers)
None
}
}
/// Make sure not to hold any reference to this struct while kernel can
/// write to its contents
struct SendBuffer {
name_v4: libc::sockaddr_in,
name_v6: libc::sockaddr_in6,
bytes: [u8; RESPONSE_BUF_LEN],
iovec: libc::iovec,
msghdr: libc::msghdr,
}
impl SendBuffer {
fn new(socket_is_ipv4: bool) -> *mut Self {
let mut instance = Box::new(Self {
name_v4: libc::sockaddr_in {
sin_family: libc::AF_INET as u16,
sin_port: 0,
sin_addr: libc::in_addr { s_addr: 0 },
sin_zero: [0; 8],
},
name_v6: libc::sockaddr_in6 {
sin6_family: libc::AF_INET6 as u16,
sin6_port: 0,
sin6_flowinfo: 0,
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
sin6_scope_id: 0,
},
bytes: [0; RESPONSE_BUF_LEN],
iovec: libc::iovec {
iov_base: null_mut(),
iov_len: 0,
},
msghdr: libc::msghdr {
msg_name: null_mut(),
msg_namelen: 0,
msg_iov: null_mut(),
msg_iovlen: 1,
msg_control: null_mut(),
msg_controllen: 0,
msg_flags: 0,
},
});
instance.iovec.iov_base = addr_of_mut!(instance.bytes) as *mut libc::c_void;
instance.iovec.iov_len = instance.bytes.len();
instance.msghdr.msg_iov = addr_of_mut!(instance.iovec);
if socket_is_ipv4 {
instance.msghdr.msg_name = addr_of_mut!(instance.name_v4) as *mut libc::c_void;
instance.msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
} else {
instance.msghdr.msg_name = addr_of_mut!(instance.name_v6) as *mut libc::c_void;
instance.msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in6>() as u32;
}
Box::into_raw(instance)
}
fn prepare_entry(
&mut self,
response: CowResponse,
addr: CanonicalSocketAddr,
socket_is_ipv4: bool,
metadata: &mut SendBufferMetadata,
) -> Result<io_uring::squeue::Entry, Error> {
if socket_is_ipv4 {
metadata.receiver_is_ipv4 = true;
let addr = if let Some(SocketAddr::V4(addr)) = addr.get_ipv4() {
addr
} else {
panic!("ipv6 address in ipv4 mode");
};
self.name_v4.sin_port = addr.port().to_be();
self.name_v4.sin_addr.s_addr = u32::from(*addr.ip()).to_be();
} else {
// Set receiver protocol type before calling addr.get_ipv6_mapped()
metadata.receiver_is_ipv4 = addr.is_ipv4();
let addr = if let SocketAddr::V6(addr) = addr.get_ipv6_mapped() {
addr
} else {
panic!("ipv4 address when ipv6 or ipv6-mapped address expected");
};
self.name_v6.sin6_port = addr.port().to_be();
self.name_v6.sin6_addr.s6_addr = addr.ip().octets();
}
let mut cursor = Cursor::new(&mut self.bytes[..]);
match response.write(&mut cursor) {
Ok(()) => {
self.iovec.iov_len = cursor.position() as usize;
metadata.response_type = ResponseType::from_response(&response);
Ok(SendMsg::new(SOCKET_IDENTIFIER, addr_of_mut!(self.msghdr)).build())
}
Err(err) => Err(Error::SerializationFailed(err)),
}
}
}
#[derive(Debug)]
struct SendBufferMetadata {
free: bool,
/// Only used for statistics
receiver_is_ipv4: bool,
/// Only used for statistics
response_type: ResponseType,
}
impl Default for SendBufferMetadata {
fn default() -> Self {
Self {
free: true,
receiver_is_ipv4: true,
response_type: Default::default(),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
pub enum ResponseType {
#[default]
Connect,
Announce,
Scrape,
Error,
}
impl ResponseType {
fn from_response(response: &CowResponse) -> Self {
match response {
CowResponse::Connect(_) => Self::Connect,
CowResponse::AnnounceIpv4(_) | CowResponse::AnnounceIpv6(_) => Self::Announce,
CowResponse::Scrape(_) => Self::Scrape,
CowResponse::Error(_) => Self::Error,
}
}
}

View file

@ -59,7 +59,7 @@ impl ConnectionValidator {
(&mut connection_id_bytes[..4]).copy_from_slice(&valid_until);
(&mut connection_id_bytes[4..]).copy_from_slice(&hash);
ConnectionId(i64::from_ne_bytes(connection_id_bytes))
ConnectionId::new(i64::from_ne_bytes(connection_id_bytes))
}
pub fn connection_id_valid(
@ -67,7 +67,7 @@ impl ConnectionValidator {
source_addr: CanonicalSocketAddr,
connection_id: ConnectionId,
) -> bool {
let bytes = connection_id.0.to_ne_bytes();
let bytes = connection_id.0.get().to_ne_bytes();
let (valid_until, hash) = bytes.split_at(4);
let valid_until: [u8; 4] = valid_until.try_into().unwrap();

View file

@ -12,12 +12,10 @@ use rand::{rngs::SmallRng, SeedableRng};
use aquatic_common::{CanonicalSocketAddr, PanicSentinel, ValidUntil};
use aquatic_udp_protocol::*;
use crate::common::*;
use crate::config::Config;
use storage::{TorrentMap, TorrentMaps};
use storage::TorrentMaps;
pub fn run_swarm_worker(
_sentinel: PanicSentinel,
@ -45,42 +43,65 @@ pub fn run_swarm_worker(
loop {
if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) {
let response = match (request, src.get().ip()) {
(ConnectedRequest::Announce(request), IpAddr::V4(ip)) => {
let response = handle_announce_request(
&config,
&mut rng,
&statistics_sender,
&mut torrents.ipv4,
request,
ip,
peer_valid_until,
);
// It is OK to block here as long as we don't do blocking sends
// in socket workers, which could cause a deadlock
match response_sender.send_ref_to(sender_index) {
Ok(mut send_ref) => {
send_ref.addr = src;
ConnectedResponse::AnnounceIpv4(response)
}
(ConnectedRequest::Announce(request), IpAddr::V6(ip)) => {
let response = handle_announce_request(
&config,
&mut rng,
&statistics_sender,
&mut torrents.ipv6,
request,
ip,
peer_valid_until,
);
match (request, src.get().ip()) {
(ConnectedRequest::Announce(request), IpAddr::V4(ip)) => {
send_ref.kind = ConnectedResponseKind::AnnounceIpv4;
ConnectedResponse::AnnounceIpv6(response)
}
(ConnectedRequest::Scrape(request), IpAddr::V4(_)) => {
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents.ipv4, request))
}
(ConnectedRequest::Scrape(request), IpAddr::V6(_)) => {
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents.ipv6, request))
}
};
torrents
.ipv4
.0
.entry(request.info_hash)
.or_default()
.announce(
&config,
&statistics_sender,
&mut rng,
&request,
ip.into(),
peer_valid_until,
&mut send_ref.announce_ipv4,
);
}
(ConnectedRequest::Announce(request), IpAddr::V6(ip)) => {
send_ref.kind = ConnectedResponseKind::AnnounceIpv6;
response_sender.try_send_to(sender_index, response, src);
torrents
.ipv6
.0
.entry(request.info_hash)
.or_default()
.announce(
&config,
&statistics_sender,
&mut rng,
&request,
ip.into(),
peer_valid_until,
&mut send_ref.announce_ipv6,
);
}
(ConnectedRequest::Scrape(request), IpAddr::V4(_)) => {
send_ref.kind = ConnectedResponseKind::Scrape;
torrents.ipv4.scrape(request, &mut send_ref.scrape);
}
(ConnectedRequest::Scrape(request), IpAddr::V6(_)) => {
send_ref.kind = ConnectedResponseKind::Scrape;
torrents.ipv6.scrape(request, &mut send_ref.scrape);
}
};
}
Err(_) => {
panic!("swarm response channel closed");
}
}
}
// Run periodic tasks
@ -116,85 +137,3 @@ pub fn run_swarm_worker(
iter_counter = iter_counter.wrapping_add(1);
}
}
fn handle_announce_request<I: Ip>(
config: &Config,
rng: &mut SmallRng,
statistics_sender: &Sender<StatisticsMessage>,
torrents: &mut TorrentMap<I>,
request: AnnounceRequest,
peer_ip: I,
peer_valid_until: ValidUntil,
) -> AnnounceResponse<I> {
let max_num_peers_to_take: usize = if request.peers_wanted.0 <= 0 {
config.protocol.max_response_peers
} else {
::std::cmp::min(
config.protocol.max_response_peers,
request.peers_wanted.0.try_into().unwrap(),
)
};
let torrent_data = torrents.0.entry(request.info_hash).or_default();
let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
torrent_data.update_peer(
config,
statistics_sender,
request.peer_id,
peer_ip,
request.port,
peer_status,
peer_valid_until,
);
let response_peers = if let PeerStatus::Stopped = peer_status {
Vec::new()
} else {
torrent_data.extract_response_peers(rng, request.peer_id, max_num_peers_to_take)
};
AnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.protocol.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers().try_into().unwrap_or(i32::MAX)),
seeders: NumberOfPeers(torrent_data.num_seeders().try_into().unwrap_or(i32::MAX)),
peers: response_peers,
}
}
fn handle_scrape_request<I: Ip>(
torrents: &mut TorrentMap<I>,
request: PendingScrapeRequest,
) -> PendingScrapeResponse {
const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0);
let torrent_stats = request
.info_hashes
.into_iter()
.map(|(i, info_hash)| {
let stats = torrents
.0
.get(&info_hash)
.map(|torrent_data| torrent_data.scrape_statistics())
.unwrap_or(EMPTY_STATS);
(i, stats)
})
.collect();
PendingScrapeResponse {
slab_key: request.slab_key,
torrent_stats,
}
}
#[inline(always)]
const fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(0), // No implementation planned
leechers: NumberOfPeers(leechers),
}
}

View file

@ -1,5 +1,3 @@
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::sync::atomic::Ordering;
use std::sync::Arc;
@ -8,180 +6,68 @@ use aquatic_common::SecondsSinceServerStart;
use aquatic_common::ServerStartInstant;
use aquatic_common::{
access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache, AccessListMode},
extract_response_peers, ValidUntil,
ValidUntil,
};
use aquatic_udp_protocol::*;
use crossbeam_channel::Sender;
use hdrhistogram::Histogram;
use rand::prelude::SmallRng;
use rand::Rng;
use crate::common::*;
use crate::config::Config;
use super::create_torrent_scrape_statistics;
#[derive(Clone, Debug)]
struct Peer<I: Ip> {
ip_address: I,
port: Port,
is_seeder: bool,
valid_until: ValidUntil,
pub struct TorrentMaps {
pub ipv4: TorrentMap<Ipv4AddrBytes>,
pub ipv6: TorrentMap<Ipv6AddrBytes>,
}
impl<I: Ip> Peer<I> {
fn to_response_peer(_: &PeerId, peer: &Self) -> ResponsePeer<I> {
ResponsePeer {
ip_address: peer.ip_address,
port: peer.port,
}
}
}
type PeerMap<I> = IndexMap<PeerId, Peer<I>>;
pub struct TorrentData<I: Ip> {
peers: PeerMap<I>,
num_seeders: usize,
}
impl<I: Ip> TorrentData<I> {
pub fn update_peer(
&mut self,
config: &Config,
statistics_sender: &Sender<StatisticsMessage>,
peer_id: PeerId,
ip_address: I,
port: Port,
status: PeerStatus,
valid_until: ValidUntil,
) {
let opt_removed_peer = match status {
PeerStatus::Leeching => {
let peer = Peer {
ip_address,
port,
is_seeder: false,
valid_until,
};
self.peers.insert(peer_id, peer)
}
PeerStatus::Seeding => {
let peer = Peer {
ip_address,
port,
is_seeder: true,
valid_until,
};
self.num_seeders += 1;
self.peers.insert(peer_id, peer)
}
PeerStatus::Stopped => self.peers.remove(&peer_id),
};
if config.statistics.peer_clients {
match (status, opt_removed_peer.is_some()) {
// We added a new peer
(PeerStatus::Leeching | PeerStatus::Seeding, false) => {
if let Err(_) =
statistics_sender.try_send(StatisticsMessage::PeerAdded(peer_id))
{
// Should never happen in practice
::log::error!("Couldn't send StatisticsMessage::PeerAdded");
}
}
// We removed an existing peer
(PeerStatus::Stopped, true) => {
if let Err(_) =
statistics_sender.try_send(StatisticsMessage::PeerRemoved(peer_id))
{
// Should never happen in practice
::log::error!("Couldn't send StatisticsMessage::PeerRemoved");
}
}
_ => (),
}
}
if let Some(Peer {
is_seeder: true, ..
}) = opt_removed_peer
{
self.num_seeders -= 1;
}
}
pub fn extract_response_peers(
&self,
rng: &mut SmallRng,
peer_id: PeerId,
max_num_peers_to_take: usize,
) -> Vec<ResponsePeer<I>> {
extract_response_peers(
rng,
&self.peers,
max_num_peers_to_take,
peer_id,
Peer::to_response_peer,
)
}
pub fn num_leechers(&self) -> usize {
self.peers.len() - self.num_seeders
}
pub fn num_seeders(&self) -> usize {
self.num_seeders
}
pub fn scrape_statistics(&self) -> TorrentScrapeStatistics {
create_torrent_scrape_statistics(
self.num_seeders.try_into().unwrap_or(i32::MAX),
self.num_leechers().try_into().unwrap_or(i32::MAX),
)
}
/// Remove inactive peers and reclaim space
fn clean(
&mut self,
config: &Config,
statistics_sender: &Sender<StatisticsMessage>,
now: SecondsSinceServerStart,
) {
self.peers.retain(|peer_id, peer| {
let keep = peer.valid_until.valid(now);
if !keep {
if peer.is_seeder {
self.num_seeders -= 1;
}
if config.statistics.peer_clients {
if let Err(_) =
statistics_sender.try_send(StatisticsMessage::PeerRemoved(*peer_id))
{
// Should never happen in practice
::log::error!("Couldn't send StatisticsMessage::PeerRemoved");
}
}
}
keep
});
if !self.peers.is_empty() {
self.peers.shrink_to_fit();
}
}
}
impl<I: Ip> Default for TorrentData<I> {
impl Default for TorrentMaps {
fn default() -> Self {
Self {
peers: Default::default(),
num_seeders: 0,
ipv4: TorrentMap(Default::default()),
ipv6: TorrentMap(Default::default()),
}
}
}
impl TorrentMaps {
/// Remove forbidden or inactive torrents, reclaim space and update statistics
pub fn clean_and_update_statistics(
&mut self,
config: &Config,
state: &State,
statistics_sender: &Sender<StatisticsMessage>,
access_list: &Arc<AccessListArcSwap>,
server_start_instant: ServerStartInstant,
worker_index: SwarmWorkerIndex,
) {
let mut cache = create_access_list_cache(access_list);
let mode = config.access_list.mode;
let now = server_start_instant.seconds_elapsed();
let ipv4 =
self.ipv4
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
let ipv6 =
self.ipv6
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
if config.statistics.active() {
state.statistics_ipv4.peers[worker_index.0].store(ipv4.0, Ordering::Release);
state.statistics_ipv6.peers[worker_index.0].store(ipv6.0, Ordering::Release);
if let Some(message) = ipv4.1.map(StatisticsMessage::Ipv4PeerHistogram) {
if let Err(err) = statistics_sender.try_send(message) {
::log::error!("couldn't send statistics message: {:#}", err);
}
}
if let Some(message) = ipv6.1.map(StatisticsMessage::Ipv6PeerHistogram) {
if let Err(err) = statistics_sender.try_send(message) {
::log::error!("couldn't send statistics message: {:#}", err);
}
}
}
}
}
@ -190,6 +76,21 @@ impl<I: Ip> Default for TorrentData<I> {
pub struct TorrentMap<I: Ip>(pub IndexMap<InfoHash, TorrentData<I>>);
impl<I: Ip> TorrentMap<I> {
pub fn scrape(&mut self, request: PendingScrapeRequest, response: &mut PendingScrapeResponse) {
response.slab_key = request.slab_key;
let torrent_stats = request.info_hashes.into_iter().map(|(i, info_hash)| {
let stats = self
.0
.get(&info_hash)
.map(|torrent_data| torrent_data.scrape_statistics())
.unwrap_or_else(|| create_torrent_scrape_statistics(0, 0));
(i, stats)
});
response.torrent_stats.extend(torrent_stats);
}
/// Remove forbidden or inactive torrents, reclaim space and return number of remaining peers
fn clean_and_get_statistics(
&mut self,
@ -255,148 +156,235 @@ impl<I: Ip> TorrentMap<I> {
}
}
pub struct TorrentMaps {
pub ipv4: TorrentMap<Ipv4Addr>,
pub ipv6: TorrentMap<Ipv6Addr>,
pub struct TorrentData<I: Ip> {
peers: IndexMap<PeerId, Peer<I>>,
num_seeders: usize,
}
impl Default for TorrentMaps {
fn default() -> Self {
Self {
ipv4: TorrentMap(Default::default()),
ipv6: TorrentMap(Default::default()),
}
}
}
impl TorrentMaps {
/// Remove forbidden or inactive torrents, reclaim space and update statistics
pub fn clean_and_update_statistics(
impl<I: Ip> TorrentData<I> {
pub fn announce(
&mut self,
config: &Config,
state: &State,
statistics_sender: &Sender<StatisticsMessage>,
access_list: &Arc<AccessListArcSwap>,
server_start_instant: ServerStartInstant,
worker_index: SwarmWorkerIndex,
rng: &mut SmallRng,
request: &AnnounceRequest,
ip_address: I,
valid_until: ValidUntil,
response: &mut AnnounceResponse<I>,
) {
let mut cache = create_access_list_cache(access_list);
let mode = config.access_list.mode;
let now = server_start_instant.seconds_elapsed();
let max_num_peers_to_take: usize = if request.peers_wanted.0.get() <= 0 {
config.protocol.max_response_peers
} else {
::std::cmp::min(
config.protocol.max_response_peers,
request.peers_wanted.0.get().try_into().unwrap(),
)
};
let ipv4 =
self.ipv4
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
let ipv6 =
self.ipv6
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
let status =
PeerStatus::from_event_and_bytes_left(request.event.into(), request.bytes_left);
if config.statistics.active() {
state.statistics_ipv4.peers[worker_index.0].store(ipv4.0, Ordering::Release);
state.statistics_ipv6.peers[worker_index.0].store(ipv6.0, Ordering::Release);
let opt_removed_peer = self.peers.remove(&request.peer_id);
if let Some(message) = ipv4.1.map(StatisticsMessage::Ipv4PeerHistogram) {
if let Err(err) = statistics_sender.try_send(message) {
::log::error!("couldn't send statistics message: {:#}", err);
if let Some(Peer {
is_seeder: true, ..
}) = opt_removed_peer
{
self.num_seeders -= 1;
}
// Create the response before inserting the peer. This means that we
// don't have to filter it out from the response peers, and that the
// reported number of seeders/leechers will not include it
response.fixed = AnnounceResponseFixedData {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval::new(config.protocol.peer_announce_interval),
leechers: NumberOfPeers::new(self.num_leechers().try_into().unwrap_or(i32::MAX)),
seeders: NumberOfPeers::new(self.num_seeders().try_into().unwrap_or(i32::MAX)),
};
extract_response_peers(
rng,
&self.peers,
max_num_peers_to_take,
Peer::to_response_peer,
&mut response.peers,
);
match status {
PeerStatus::Leeching => {
let peer = Peer {
ip_address,
port: request.port,
is_seeder: false,
valid_until,
};
self.peers.insert(request.peer_id, peer);
if config.statistics.peer_clients && opt_removed_peer.is_none() {
statistics_sender
.try_send(StatisticsMessage::PeerAdded(request.peer_id))
.expect("statistics channel should be unbounded");
}
}
if let Some(message) = ipv6.1.map(StatisticsMessage::Ipv6PeerHistogram) {
if let Err(err) = statistics_sender.try_send(message) {
::log::error!("couldn't send statistics message: {:#}", err);
PeerStatus::Seeding => {
let peer = Peer {
ip_address,
port: request.port,
is_seeder: true,
valid_until,
};
self.peers.insert(request.peer_id, peer);
self.num_seeders += 1;
if config.statistics.peer_clients && opt_removed_peer.is_none() {
statistics_sender
.try_send(StatisticsMessage::PeerAdded(request.peer_id))
.expect("statistics channel should be unbounded");
}
}
PeerStatus::Stopped => {
if config.statistics.peer_clients && opt_removed_peer.is_some() {
statistics_sender
.try_send(StatisticsMessage::PeerRemoved(request.peer_id))
.expect("statistics channel should be unbounded");
}
}
};
}
pub fn num_leechers(&self) -> usize {
self.peers.len() - self.num_seeders
}
pub fn num_seeders(&self) -> usize {
self.num_seeders
}
pub fn scrape_statistics(&self) -> TorrentScrapeStatistics {
create_torrent_scrape_statistics(
self.num_seeders.try_into().unwrap_or(i32::MAX),
self.num_leechers().try_into().unwrap_or(i32::MAX),
)
}
/// Remove inactive peers and reclaim space
fn clean(
&mut self,
config: &Config,
statistics_sender: &Sender<StatisticsMessage>,
now: SecondsSinceServerStart,
) {
self.peers.retain(|peer_id, peer| {
let keep = peer.valid_until.valid(now);
if !keep {
if peer.is_seeder {
self.num_seeders -= 1;
}
if config.statistics.peer_clients {
if let Err(_) =
statistics_sender.try_send(StatisticsMessage::PeerRemoved(*peer_id))
{
// Should never happen in practice
::log::error!("Couldn't send StatisticsMessage::PeerRemoved");
}
}
}
keep
});
if !self.peers.is_empty() {
self.peers.shrink_to_fit();
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::net::Ipv4Addr;
use quickcheck::{quickcheck, TestResult};
use rand::thread_rng;
use super::*;
fn gen_peer_id(i: u32) -> PeerId {
let mut peer_id = PeerId([0; 20]);
peer_id.0[0..4].copy_from_slice(&i.to_ne_bytes());
peer_id
}
fn gen_peer(i: u32) -> Peer<Ipv4Addr> {
Peer {
ip_address: Ipv4Addr::from(i.to_be_bytes()),
port: Port(1),
is_seeder: false,
valid_until: ValidUntil::new(ServerStartInstant::new(), 0),
impl<I: Ip> Default for TorrentData<I> {
fn default() -> Self {
Self {
peers: Default::default(),
num_seeders: 0,
}
}
#[test]
fn test_extract_response_peers() {
fn prop(data: (u16, u16)) -> TestResult {
let gen_num_peers = data.0 as u32;
let req_num_peers = data.1 as usize;
let mut peer_map: PeerMap<Ipv4Addr> = Default::default();
let mut opt_sender_key = None;
let mut opt_sender_peer = None;
for i in 0..gen_num_peers {
let key = gen_peer_id(i);
let peer = gen_peer((i << 16) + i);
if i == 0 {
opt_sender_key = Some(key);
opt_sender_peer = Some(Peer::to_response_peer(&key, &peer));
}
peer_map.insert(key, peer);
}
let mut rng = thread_rng();
let peers = extract_response_peers(
&mut rng,
&peer_map,
req_num_peers,
opt_sender_key.unwrap_or_else(|| gen_peer_id(1)),
Peer::to_response_peer,
);
// Check that number of returned peers is correct
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize
|| peers.len() + 1 == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap) and that sender
// isn't returned
let mut ip_addresses = HashSet::with_capacity(peers.len());
for peer in peers {
if peer == opt_sender_peer.clone().unwrap()
|| ip_addresses.contains(&peer.ip_address)
{
success = false;
break;
}
ip_addresses.insert(peer.ip_address);
}
TestResult::from_bool(success)
}
quickcheck(prop as fn((u16, u16)) -> TestResult);
}
}
#[derive(Clone, Debug)]
struct Peer<I: Ip> {
ip_address: I,
port: Port,
is_seeder: bool,
valid_until: ValidUntil,
}
impl<I: Ip> Peer<I> {
fn to_response_peer(_: &PeerId, peer: &Self) -> ResponsePeer<I> {
ResponsePeer {
ip_address: peer.ip_address,
port: peer.port,
}
}
}
/// Extract response peers
///
/// If there are more peers in map than `max_num_peers_to_take`, do a random
/// selection of peers from first and second halves of map in order to avoid
/// returning too homogeneous peers.
///
/// Does NOT filter out announcing peer.
#[inline]
pub fn extract_response_peers<K, V, R, F>(
rng: &mut impl Rng,
peer_map: &IndexMap<K, V>,
max_num_peers_to_take: usize,
peer_conversion_function: F,
peers: &mut Vec<R>,
) where
K: Eq + ::std::hash::Hash,
F: Fn(&K, &V) -> R,
{
if peer_map.len() <= max_num_peers_to_take {
peers.extend(peer_map.iter().map(|(k, v)| peer_conversion_function(k, v)));
} else {
let middle_index = peer_map.len() / 2;
let num_to_take_per_half = max_num_peers_to_take / 2;
let offset_half_one = {
let from = 0;
let to = usize::max(1, middle_index - num_to_take_per_half);
rng.gen_range(from..to)
};
let offset_half_two = {
let from = middle_index;
let to = usize::max(middle_index + 1, peer_map.len() - num_to_take_per_half);
rng.gen_range(from..to)
};
let end_half_one = offset_half_one + num_to_take_per_half;
let end_half_two = offset_half_two + num_to_take_per_half;
if let Some(slice) = peer_map.get_range(offset_half_one..end_half_one) {
peers.extend(slice.iter().map(|(k, v)| peer_conversion_function(k, v)));
}
if let Some(slice) = peer_map.get_range(offset_half_two..end_half_two) {
peers.extend(slice.iter().map(|(k, v)| peer_conversion_function(k, v)));
}
}
}
#[inline(always)]
fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers::new(seeders),
completed: NumberOfDownloads::new(0), // No implementation planned
leechers: NumberOfPeers::new(leechers),
}
}

View file

@ -10,8 +10,8 @@ use anyhow::Context;
use aquatic_udp::{common::BUFFER_SIZE, config::Config};
use aquatic_udp_protocol::{
common::PeerId, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash,
NumberOfBytes, NumberOfPeers, PeerKey, Port, Request, Response, ScrapeRequest, ScrapeResponse,
TransactionId,
Ipv4AddrBytes, NumberOfBytes, NumberOfPeers, PeerKey, Port, Request, Response, ScrapeRequest,
ScrapeResponse, TransactionId,
};
// FIXME: should ideally try different ports and use sync primitives to find
@ -26,7 +26,7 @@ pub fn run_tracker(config: Config) {
pub fn connect(socket: &UdpSocket, tracker_addr: SocketAddr) -> anyhow::Result<ConnectionId> {
let request = Request::Connect(ConnectRequest {
transaction_id: TransactionId(0),
transaction_id: TransactionId::new(0),
});
let response = request_and_response(&socket, tracker_addr, request)?;
@ -55,17 +55,18 @@ pub fn announce(
let request = Request::Announce(AnnounceRequest {
connection_id,
transaction_id: TransactionId(0),
action_placeholder: Default::default(),
transaction_id: TransactionId::new(0),
info_hash,
peer_id,
bytes_downloaded: NumberOfBytes(0),
bytes_uploaded: NumberOfBytes(0),
bytes_left: NumberOfBytes(if seeder { 0 } else { 1 }),
event: AnnounceEvent::Started,
ip_address: None,
key: PeerKey(0),
peers_wanted: NumberOfPeers(peers_wanted as i32),
port: Port(peer_port),
bytes_downloaded: NumberOfBytes::new(0),
bytes_uploaded: NumberOfBytes::new(0),
bytes_left: NumberOfBytes::new(if seeder { 0 } else { 1 }),
event: AnnounceEvent::Started.into(),
ip_address: Ipv4AddrBytes([0; 4]),
key: PeerKey::new(0),
peers_wanted: NumberOfPeers::new(peers_wanted as i32),
port: Port::new(peer_port),
});
Ok(request_and_response(&socket, tracker_addr, request)?)
@ -79,7 +80,7 @@ pub fn scrape(
) -> anyhow::Result<ScrapeResponse> {
let request = Request::Scrape(ScrapeRequest {
connection_id,
transaction_id: TransactionId(0),
transaction_id: TransactionId::new(0),
info_hashes,
});

View file

@ -11,8 +11,8 @@ use std::{
use anyhow::Context;
use aquatic_udp::{common::BUFFER_SIZE, config::Config};
use aquatic_udp_protocol::{
common::PeerId, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes,
NumberOfPeers, PeerKey, Port, Request, ScrapeRequest, TransactionId,
common::PeerId, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, Ipv4AddrBytes,
NumberOfBytes, NumberOfPeers, PeerKey, Port, Request, ScrapeRequest, TransactionId,
};
#[test]
@ -40,22 +40,23 @@ fn test_invalid_connection_id() -> anyhow::Result<()> {
let announce_request = Request::Announce(AnnounceRequest {
connection_id: invalid_connection_id,
transaction_id: TransactionId(0),
action_placeholder: Default::default(),
transaction_id: TransactionId::new(0),
info_hash: InfoHash([0; 20]),
peer_id: PeerId([0; 20]),
bytes_downloaded: NumberOfBytes(0),
bytes_uploaded: NumberOfBytes(0),
bytes_left: NumberOfBytes(0),
event: AnnounceEvent::Started,
ip_address: None,
key: PeerKey(0),
peers_wanted: NumberOfPeers(10),
port: Port(1),
bytes_downloaded: NumberOfBytes::new(0),
bytes_uploaded: NumberOfBytes::new(0),
bytes_left: NumberOfBytes::new(0),
event: AnnounceEvent::Started.into(),
ip_address: Ipv4AddrBytes([0; 4]),
key: PeerKey::new(0),
peers_wanted: NumberOfPeers::new(10),
port: Port::new(1),
});
let scrape_request = Request::Scrape(ScrapeRequest {
connection_id: invalid_connection_id,
transaction_id: TransactionId(0),
transaction_id: TransactionId::new(0),
info_hashes: vec![InfoHash([0; 20])],
});

View file

@ -35,12 +35,6 @@ fn test_multiple_connect_announce_scrape() -> anyhow::Result<()> {
for i in 0..20 {
let is_seeder = i % 3 == 0;
if is_seeder {
num_seeders += 1;
} else {
num_leechers += 1;
}
let socket = UdpSocket::bind(peer_addr)?;
socket.set_read_timeout(Some(Duration::from_secs(1)))?;
@ -67,11 +61,11 @@ fn test_multiple_connect_announce_scrape() -> anyhow::Result<()> {
assert_eq!(announce_response.peers.len(), i.min(PEERS_WANTED));
assert_eq!(announce_response.seeders.0, num_seeders);
assert_eq!(announce_response.leechers.0, num_leechers);
assert_eq!(announce_response.fixed.seeders.0.get(), num_seeders);
assert_eq!(announce_response.fixed.leechers.0.get(), num_leechers);
let response_peer_ports: HashSet<u16, RandomState> =
HashSet::from_iter(announce_response.peers.iter().map(|p| p.port.0));
HashSet::from_iter(announce_response.peers.iter().map(|p| p.port.0.get()));
let expected_peer_ports: HashSet<u16, RandomState> =
HashSet::from_iter((0..i).map(|i| PEER_PORT_START + i as u16));
@ -81,6 +75,13 @@ fn test_multiple_connect_announce_scrape() -> anyhow::Result<()> {
assert_eq!(response_peer_ports, expected_peer_ports);
}
// Do this after announce is evaluated, since it is expected not to include announcing peer
if is_seeder {
num_seeders += 1;
} else {
num_leechers += 1;
}
let scrape_response = scrape(
&socket,
tracker_addr,
@ -89,10 +90,16 @@ fn test_multiple_connect_announce_scrape() -> anyhow::Result<()> {
)
.with_context(|| "scrape")?;
assert_eq!(scrape_response.torrent_stats[0].seeders.0, num_seeders);
assert_eq!(scrape_response.torrent_stats[0].leechers.0, num_leechers);
assert_eq!(scrape_response.torrent_stats[1].seeders.0, 0);
assert_eq!(scrape_response.torrent_stats[1].leechers.0, 0);
assert_eq!(
scrape_response.torrent_stats[0].seeders.0.get(),
num_seeders
);
assert_eq!(
scrape_response.torrent_stats[0].leechers.0.get(),
num_leechers
);
assert_eq!(scrape_response.torrent_stats[1].seeders.0.get(), 0);
assert_eq!(scrape_response.torrent_stats[1].leechers.0.get(), 0);
}
Ok(())

View file

@ -1,27 +0,0 @@
[package]
name = "aquatic_udp_bench"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
repository.workspace = true
readme.workspace = true
rust-version.workspace = true
[[bin]]
name = "aquatic_udp_bench"
[dependencies]
aquatic_common.workspace = true
aquatic_toml_config.workspace = true
aquatic_udp.workspace = true
aquatic_udp_protocol.workspace = true
anyhow = "1"
crossbeam-channel = "0.5"
indicatif = "0.17"
mimalloc = { version = "0.1", default-features = false }
num-format = "0.4"
rand_distr = "0.4"
rand = { version = "0.8", features = ["small_rng"] }
serde = { version = "1", features = ["derive"] }

View file

@ -1,115 +0,0 @@
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::time::{Duration, Instant};
use aquatic_common::CanonicalSocketAddr;
use crossbeam_channel::{Receiver, Sender};
use indicatif::ProgressIterator;
use rand::Rng;
use rand_distr::Gamma;
use aquatic_udp::common::*;
use aquatic_udp_protocol::*;
use crate::common::*;
use crate::config::BenchConfig;
pub fn bench_announce_handler(
bench_config: &BenchConfig,
request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
response_receiver: &Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
rng: &mut impl Rng,
info_hashes: &[InfoHash],
) -> (usize, Duration) {
let requests = create_requests(rng, info_hashes, bench_config.num_announce_requests);
let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
let mut num_responses = 0usize;
let mut dummy: u16 = rng.gen();
let pb = create_progress_bar("Announce", bench_config.num_rounds as u64);
// Start benchmark
let before = Instant::now();
for round in (0..bench_config.num_rounds).progress_with(pb) {
for request_chunk in requests.chunks(p) {
for (request, src) in request_chunk {
request_sender
.send((
SocketWorkerIndex(0),
ConnectedRequest::Announce(request.clone()),
*src,
))
.unwrap();
}
while let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.try_recv() {
num_responses += 1;
if let Some(last_peer) = r.peers.last() {
dummy ^= last_peer.port.0;
}
}
}
let total = bench_config.num_announce_requests * (round + 1);
while num_responses < total {
if let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.recv() {
num_responses += 1;
if let Some(last_peer) = r.peers.last() {
dummy ^= last_peer.port.0;
}
}
}
}
let elapsed = before.elapsed();
if dummy == 0 {
println!("dummy dummy");
}
(num_responses, elapsed)
}
pub fn create_requests(
rng: &mut impl Rng,
info_hashes: &[InfoHash],
number: usize,
) -> Vec<(AnnounceRequest, CanonicalSocketAddr)> {
let gamma = Gamma::new(GAMMA_SHAPE, GAMMA_SCALE).unwrap();
let max_index = info_hashes.len() - 1;
let mut requests = Vec::new();
for _ in 0..number {
let info_hash_index = gamma_usize(rng, gamma, max_index);
let request = AnnounceRequest {
connection_id: ConnectionId(0),
transaction_id: TransactionId(rng.gen()),
info_hash: info_hashes[info_hash_index],
peer_id: PeerId(rng.gen()),
bytes_downloaded: NumberOfBytes(rng.gen()),
bytes_uploaded: NumberOfBytes(rng.gen()),
bytes_left: NumberOfBytes(rng.gen()),
event: AnnounceEvent::Started,
ip_address: None,
key: PeerKey(rng.gen()),
peers_wanted: NumberOfPeers(rng.gen()),
port: Port(rng.gen()),
};
requests.push((
request,
CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 1))),
));
}
requests
}

View file

@ -1,24 +0,0 @@
use indicatif::{ProgressBar, ProgressStyle};
use rand::Rng;
use rand_distr::Gamma;
pub const GAMMA_SHAPE: f64 = 0.2;
pub const GAMMA_SCALE: f64 = 100.0;
pub const NUM_INFO_HASHES: usize = 10_000;
pub fn create_progress_bar(name: &str, iterations: u64) -> ProgressBar {
let t = format!("{:<8} {}", name, "{wide_bar} {pos:>2}/{len:>2}");
let style = ProgressStyle::default_bar()
.template(&t)
.expect("setup indicatif template");
ProgressBar::new(iterations).with_style(style)
}
pub fn gamma_usize(rng: &mut impl Rng, gamma: Gamma<f64>, max: usize) -> usize {
let p: f64 = rng.sample(gamma);
let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize
}

View file

@ -1,35 +0,0 @@
use aquatic_toml_config::TomlConfig;
use serde::Deserialize;
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct BenchConfig {
pub num_rounds: usize,
pub num_threads: usize,
pub num_connect_requests: usize,
pub num_announce_requests: usize,
pub num_scrape_requests: usize,
pub num_hashes_per_scrape_request: usize,
}
impl Default for BenchConfig {
fn default() -> Self {
Self {
num_rounds: 10,
num_threads: 2,
num_connect_requests: 5_000_000,
num_announce_requests: 2_000_000,
num_scrape_requests: 2_000_000,
num_hashes_per_scrape_request: 20,
}
}
}
impl aquatic_common::cli::Config for BenchConfig {}
#[cfg(test)]
mod tests {
use super::BenchConfig;
::aquatic_toml_config::gen_serialize_deserialize_test!(BenchConfig);
}

View file

@ -1,127 +0,0 @@
//! Benchmark announce and scrape handlers
//!
//! Example outputs:
//! ```
//! # Results over 10 rounds with 2 threads
//! Announce: 429 540 requests/second, 2328.07 ns/request
//! Scrape: 1 873 545 requests/second, 533.75 ns/request
//! ```
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
use aquatic_udp::workers::swarm::run_swarm_worker;
use crossbeam_channel::unbounded;
use num_format::{Locale, ToFormattedString};
use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng};
use std::time::Duration;
use aquatic_common::cli::run_app_with_cli_and_config;
use aquatic_udp::common::*;
use aquatic_udp::config::Config;
use aquatic_udp_protocol::*;
use config::BenchConfig;
mod announce;
mod common;
mod config;
mod scrape;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn main() {
run_app_with_cli_and_config::<BenchConfig>(
"aquatic_udp_bench: Run aquatic_udp benchmarks",
env!("CARGO_PKG_VERSION"),
run,
None,
)
}
pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> {
// Setup common state, spawn request handlers
let mut aquatic_config = Config::default();
let (_, sentinel) = PanicSentinelWatcher::create_with_sentinel();
aquatic_config.cleaning.torrent_cleaning_interval = 60 * 60 * 24;
let (request_sender, request_receiver) = unbounded();
let (response_sender, response_receiver) = unbounded();
let response_sender = ConnectedResponseSender::new(vec![response_sender]);
let (statistics_sender, _) = unbounded();
let server_start_instant = ServerStartInstant::new();
{
let config = aquatic_config.clone();
let state = State::new(config.swarm_workers);
::std::thread::spawn(move || {
run_swarm_worker(
sentinel,
config,
state,
server_start_instant,
request_receiver,
response_sender,
statistics_sender,
SwarmWorkerIndex(0),
)
});
}
// Run benchmarks
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
let info_hashes = create_info_hashes(&mut rng);
let a = announce::bench_announce_handler(
&bench_config,
&request_sender,
&response_receiver,
&mut rng,
&info_hashes,
);
let s = scrape::bench_scrape_handler(
&bench_config,
&request_sender,
&response_receiver,
&mut rng,
&info_hashes,
);
println!(
"\n# Results over {} rounds with {} threads",
bench_config.num_rounds, bench_config.num_threads,
);
print_results("Announce:", a.0, a.1);
print_results("Scrape: ", s.0, s.1);
Ok(())
}
pub fn print_results(request_type: &str, num_responses: usize, duration: Duration) {
let per_second = ((num_responses as f64 / (duration.as_micros() as f64 / 1000000.0)) as usize)
.to_formatted_string(&Locale::se);
let time_per_request = duration.as_nanos() as f64 / (num_responses as f64);
println!(
"{} {:>10} requests/second, {:>8.2} ns/request",
request_type, per_second, time_per_request,
);
}
fn create_info_hashes(rng: &mut impl Rng) -> Vec<InfoHash> {
let mut info_hashes = Vec::new();
for _ in 0..common::NUM_INFO_HASHES {
info_hashes.push(InfoHash(rng.gen()));
}
info_hashes
}

View file

@ -1,123 +0,0 @@
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::time::{Duration, Instant};
use aquatic_common::CanonicalSocketAddr;
use crossbeam_channel::{Receiver, Sender};
use indicatif::ProgressIterator;
use rand::Rng;
use rand_distr::Gamma;
use aquatic_udp::common::*;
use aquatic_udp_protocol::*;
use crate::common::*;
use crate::config::BenchConfig;
pub fn bench_scrape_handler(
bench_config: &BenchConfig,
request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
response_receiver: &Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
rng: &mut impl Rng,
info_hashes: &[InfoHash],
) -> (usize, Duration) {
let requests = create_requests(
rng,
info_hashes,
bench_config.num_scrape_requests,
bench_config.num_hashes_per_scrape_request,
);
let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
let mut num_responses = 0usize;
let mut dummy: i32 = rng.gen();
let pb = create_progress_bar("Scrape", bench_config.num_rounds as u64);
// Start benchmark
let before = Instant::now();
for round in (0..bench_config.num_rounds).progress_with(pb) {
for request_chunk in requests.chunks(p) {
for (request, src) in request_chunk {
let request = ConnectedRequest::Scrape(PendingScrapeRequest {
slab_key: 0,
info_hashes: request
.info_hashes
.clone()
.into_iter()
.enumerate()
.collect(),
});
request_sender
.send((SocketWorkerIndex(0), request, *src))
.unwrap();
}
while let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.try_recv() {
num_responses += 1;
if let Some(stat) = response.torrent_stats.values().last() {
dummy ^= stat.leechers.0;
}
}
}
let total = bench_config.num_scrape_requests * (round + 1);
while num_responses < total {
if let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.recv() {
num_responses += 1;
if let Some(stat) = response.torrent_stats.values().last() {
dummy ^= stat.leechers.0;
}
}
}
}
let elapsed = before.elapsed();
if dummy == 0 {
println!("dummy dummy");
}
(num_responses, elapsed)
}
pub fn create_requests(
rng: &mut impl Rng,
info_hashes: &[InfoHash],
number: usize,
hashes_per_request: usize,
) -> Vec<(ScrapeRequest, CanonicalSocketAddr)> {
let gamma = Gamma::new(GAMMA_SHAPE, GAMMA_SCALE).unwrap();
let max_index = info_hashes.len() - 1;
let mut requests = Vec::new();
for _ in 0..number {
let mut request_info_hashes = Vec::new();
for _ in 0..hashes_per_request {
let info_hash_index = gamma_usize(rng, gamma, max_index);
request_info_hashes.push(info_hashes[info_hash_index])
}
let request = ScrapeRequest {
connection_id: ConnectionId(0),
transaction_id: TransactionId(rng.gen()),
info_hashes: request_info_hashes,
};
requests.push((
request,
CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 1))),
));
}
requests
}

View file

@ -19,7 +19,7 @@ pub fn generate_info_hash() -> InfoHash {
}
pub fn generate_transaction_id(rng: &mut impl Rng) -> TransactionId {
TransactionId(rng.gen())
TransactionId::new(rng.gen())
}
pub fn create_connect_request(transaction_id: TransactionId) -> Request {

View file

@ -49,14 +49,14 @@ pub fn process_response(
rng,
info_hashes,
torrent_peers,
r.transaction_id,
r.fixed.transaction_id,
),
Response::AnnounceIpv6(r) => if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id,
r.fixed.transaction_id,
),
Response::Scrape(r) => if_torrent_peer_move_and_create_random_request(
config,
@ -143,24 +143,25 @@ fn create_announce_request(
) -> Request {
let (event, bytes_left) = {
if rng.gen_bool(config.requests.peer_seeder_probability) {
(AnnounceEvent::Completed, NumberOfBytes(0))
(AnnounceEvent::Completed, NumberOfBytes::new(0))
} else {
(AnnounceEvent::Started, NumberOfBytes(50))
(AnnounceEvent::Started, NumberOfBytes::new(50))
}
};
(AnnounceRequest {
connection_id: torrent_peer.connection_id,
action_placeholder: Default::default(),
transaction_id,
info_hash: torrent_peer.info_hash,
peer_id: torrent_peer.peer_id,
bytes_downloaded: NumberOfBytes(50),
bytes_uploaded: NumberOfBytes(50),
bytes_downloaded: NumberOfBytes::new(50),
bytes_uploaded: NumberOfBytes::new(50),
bytes_left,
event,
ip_address: None,
key: PeerKey(12345),
peers_wanted: NumberOfPeers(100),
event: event.into(),
ip_address: Ipv4AddrBytes([0; 4]),
key: PeerKey::new(12345),
peers_wanted: NumberOfPeers::new(100),
port: torrent_peer.port,
})
.into()
@ -209,7 +210,7 @@ fn create_torrent_peer(
scrape_hash_indeces,
connection_id,
peer_id: generate_peer_id(),
port: Port(rng.gen()),
port: Port::new(rng.gen()),
}
}

View file

@ -15,7 +15,9 @@ aquatic_peer_id.workspace = true
byteorder = "1"
either = "1"
zerocopy = { version = "0.7", features = ["derive"] }
[dev-dependencies]
pretty_assertions = "1"
quickcheck = "1"
quickcheck_macros = "1"

View file

@ -2,45 +2,174 @@ use std::fmt::Debug;
use std::net::{Ipv4Addr, Ipv6Addr};
pub use aquatic_peer_id::{PeerClient, PeerId};
use zerocopy::network_endian::{I32, I64, U16, U32};
use zerocopy::{AsBytes, FromBytes, FromZeroes};
pub trait Ip: Clone + Copy + Debug + PartialEq + Eq {}
pub trait Ip: Clone + Copy + Debug + PartialEq + Eq + AsBytes {}
impl Ip for Ipv4Addr {}
impl Ip for Ipv6Addr {}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct AnnounceInterval(pub I32);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct AnnounceInterval(pub i32);
impl AnnounceInterval {
pub fn new(v: i32) -> Self {
Self(I32::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct InfoHash(pub [u8; 20]);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct ConnectionId(pub i64);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct ConnectionId(pub I64);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct TransactionId(pub i32);
impl ConnectionId {
pub fn new(v: i64) -> Self {
Self(I64::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfBytes(pub i64);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct TransactionId(pub I32);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfPeers(pub i32);
impl TransactionId {
pub fn new(v: i32) -> Self {
Self(I32::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct NumberOfDownloads(pub i32);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct NumberOfBytes(pub I64);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct Port(pub u16);
impl NumberOfBytes {
pub fn new(v: i64) -> Self {
Self(I64::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub struct PeerKey(pub u32);
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct NumberOfPeers(pub I32);
#[derive(PartialEq, Eq, Clone, Debug)]
impl NumberOfPeers {
pub fn new(v: i32) -> Self {
Self(I32::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct NumberOfDownloads(pub I32);
impl NumberOfDownloads {
pub fn new(v: i32) -> Self {
Self(I32::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct Port(pub U16);
impl Port {
pub fn new(v: u16) -> Self {
Self(U16::new(v))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct PeerKey(pub I32);
impl PeerKey {
pub fn new(v: i32) -> Self {
Self(I32::new(v))
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(C, packed)]
pub struct ResponsePeer<I: Ip> {
pub ip_address: I,
pub port: Port,
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct Ipv4AddrBytes(pub [u8; 4]);
impl Ip for Ipv4AddrBytes {}
impl Into<Ipv4Addr> for Ipv4AddrBytes {
fn into(self) -> Ipv4Addr {
Ipv4Addr::from(self.0)
}
}
impl Into<Ipv4AddrBytes> for Ipv4Addr {
fn into(self) -> Ipv4AddrBytes {
Ipv4AddrBytes(self.octets())
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct Ipv6AddrBytes(pub [u8; 16]);
impl Ip for Ipv6AddrBytes {}
impl Into<Ipv6Addr> for Ipv6AddrBytes {
fn into(self) -> Ipv6Addr {
Ipv6Addr::from(self.0)
}
}
impl Into<Ipv6AddrBytes> for Ipv6Addr {
fn into(self) -> Ipv6AddrBytes {
Ipv6AddrBytes(self.octets())
}
}
pub fn read_i32_ne(bytes: &mut impl ::std::io::Read) -> ::std::io::Result<I32> {
let mut tmp = [0u8; 4];
bytes.read_exact(&mut tmp)?;
Ok(I32::from_bytes(tmp))
}
pub fn read_i64_ne(bytes: &mut impl ::std::io::Read) -> ::std::io::Result<I64> {
let mut tmp = [0u8; 8];
bytes.read_exact(&mut tmp)?;
Ok(I64::from_bytes(tmp))
}
pub fn read_u16_ne(bytes: &mut impl ::std::io::Read) -> ::std::io::Result<U16> {
let mut tmp = [0u8; 2];
bytes.read_exact(&mut tmp)?;
Ok(U16::from_bytes(tmp))
}
pub fn read_u32_ne(bytes: &mut impl ::std::io::Read) -> ::std::io::Result<U32> {
let mut tmp = [0u8; 4];
bytes.read_exact(&mut tmp)?;
Ok(U32::from_bytes(tmp))
}
pub fn invalid_data() -> ::std::io::Error {
::std::io::Error::new(::std::io::ErrorKind::InvalidData, "invalid data")
}
#[cfg(test)]
impl quickcheck::Arbitrary for InfoHash {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {

View file

@ -1,9 +1,9 @@
use std::convert::TryInto;
use std::io::{self, Cursor, Read, Write};
use std::net::Ipv4Addr;
use std::io::{self, Cursor, Write};
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use byteorder::{NetworkEndian, WriteBytesExt};
use either::Either;
use zerocopy::FromZeroes;
use zerocopy::{byteorder::network_endian::I32, AsBytes, FromBytes};
use aquatic_peer_id::PeerId;
@ -11,103 +11,6 @@ use super::common::*;
const PROTOCOL_IDENTIFIER: i64 = 4_497_486_125_440;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum AnnounceEvent {
Started,
Stopped,
Completed,
None,
}
impl AnnounceEvent {
#[inline]
pub fn from_i32(i: i32) -> Self {
match i {
1 => Self::Completed,
2 => Self::Started,
3 => Self::Stopped,
_ => Self::None,
}
}
#[inline]
pub fn to_i32(&self) -> i32 {
match self {
AnnounceEvent::None => 0,
AnnounceEvent::Completed => 1,
AnnounceEvent::Started => 2,
AnnounceEvent::Stopped => 3,
}
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ConnectRequest {
pub transaction_id: TransactionId,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceRequest {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
pub info_hash: InfoHash,
pub peer_id: PeerId,
pub bytes_downloaded: NumberOfBytes,
pub bytes_uploaded: NumberOfBytes,
pub bytes_left: NumberOfBytes,
pub event: AnnounceEvent,
pub ip_address: Option<Ipv4Addr>,
pub key: PeerKey,
pub peers_wanted: NumberOfPeers,
pub port: Port,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ScrapeRequest {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
pub info_hashes: Vec<InfoHash>,
}
#[derive(Debug)]
pub enum RequestParseError {
Sendable {
connection_id: ConnectionId,
transaction_id: TransactionId,
err: Either<io::Error, &'static str>,
},
Unsendable {
err: Either<io::Error, &'static str>,
},
}
impl RequestParseError {
pub fn sendable_io(err: io::Error, connection_id: i64, transaction_id: i32) -> Self {
Self::Sendable {
connection_id: ConnectionId(connection_id),
transaction_id: TransactionId(transaction_id),
err: Either::Left(err),
}
}
pub fn sendable_text(text: &'static str, connection_id: i64, transaction_id: i32) -> Self {
Self::Sendable {
connection_id: ConnectionId(connection_id),
transaction_id: TransactionId(transaction_id),
err: Either::Right(text),
}
}
pub fn unsendable_io(err: io::Error) -> Self {
Self::Unsendable {
err: Either::Left(err),
}
}
pub fn unsendable_text(text: &'static str) -> Self {
Self::Unsendable {
err: Either::Right(text),
}
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Request {
Connect(ConnectRequest),
@ -115,6 +18,115 @@ pub enum Request {
Scrape(ScrapeRequest),
}
impl Request {
pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> {
match self {
Request::Connect(r) => {
bytes.write_i64::<NetworkEndian>(PROTOCOL_IDENTIFIER)?;
bytes.write_i32::<NetworkEndian>(0)?;
bytes.write_all(r.transaction_id.as_bytes())?;
}
Request::Announce(r) => {
bytes.write_all(r.as_bytes())?;
}
Request::Scrape(r) => {
bytes.write_all(r.connection_id.as_bytes())?;
bytes.write_i32::<NetworkEndian>(2)?;
bytes.write_all(r.transaction_id.as_bytes())?;
bytes.write_all((*r.info_hashes.as_slice()).as_bytes())?;
}
}
Ok(())
}
pub fn from_bytes(bytes: &[u8], max_scrape_torrents: u8) -> Result<Self, RequestParseError> {
let action = bytes
.get(8..12)
.map(|bytes| I32::from_bytes(bytes.try_into().unwrap()))
.ok_or_else(|| RequestParseError::unsendable_text("Couldn't parse action"))?;
match action.get() {
// Connect
0 => {
let mut bytes = Cursor::new(bytes);
let protocol_identifier =
read_i64_ne(&mut bytes).map_err(RequestParseError::unsendable_io)?;
let _action = read_i32_ne(&mut bytes).map_err(RequestParseError::unsendable_io)?;
let transaction_id = read_i32_ne(&mut bytes)
.map(TransactionId)
.map_err(RequestParseError::unsendable_io)?;
if protocol_identifier.get() == PROTOCOL_IDENTIFIER {
Ok((ConnectRequest { transaction_id }).into())
} else {
Err(RequestParseError::unsendable_text(
"Protocol identifier missing",
))
}
}
// Announce
1 => {
let request = AnnounceRequest::read_from_prefix(bytes)
.ok_or_else(|| RequestParseError::unsendable_text("invalid data"))?;
// Make sure not to create AnnounceEventBytes with invalid value
if matches!(request.event.0.get(), (0..=3)) {
Ok(Request::Announce(request))
} else {
Err(RequestParseError::sendable_text(
"Invalid announce event",
request.connection_id,
request.transaction_id,
))
}
}
// Scrape
2 => {
let mut bytes = Cursor::new(bytes);
let connection_id = read_i64_ne(&mut bytes)
.map(ConnectionId)
.map_err(RequestParseError::unsendable_io)?;
let _action = read_i32_ne(&mut bytes).map_err(RequestParseError::unsendable_io)?;
let transaction_id = read_i32_ne(&mut bytes)
.map(TransactionId)
.map_err(RequestParseError::unsendable_io)?;
let remaining_bytes = {
let position = bytes.position() as usize;
let inner = bytes.into_inner();
&inner[position..]
};
let info_hashes = FromBytes::slice_from(remaining_bytes).ok_or_else(|| {
RequestParseError::sendable_text(
"Invalid info hash list. Note that full scrapes are not allowed",
connection_id,
transaction_id,
)
})?;
let info_hashes = Vec::from(
&info_hashes[..(max_scrape_torrents as usize).min(info_hashes.len())],
);
Ok((ScrapeRequest {
connection_id,
transaction_id,
info_hashes,
})
.into())
}
_ => Err(RequestParseError::unsendable_text("Invalid action")),
}
}
}
impl From<ConnectRequest> for Request {
fn from(r: ConnectRequest) -> Self {
Self::Connect(r)
@ -133,173 +145,116 @@ impl From<ScrapeRequest> for Request {
}
}
impl Request {
pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> {
match self {
Request::Connect(r) => {
bytes.write_i64::<NetworkEndian>(PROTOCOL_IDENTIFIER)?;
bytes.write_i32::<NetworkEndian>(0)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ConnectRequest {
pub transaction_id: TransactionId,
}
Request::Announce(r) => {
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
bytes.write_i32::<NetworkEndian>(1)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
#[derive(PartialEq, Eq, Clone, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(C, packed)]
pub struct AnnounceRequest {
pub connection_id: ConnectionId,
/// This field is only present to enable zero-copy serialization and
/// deserialization.
pub action_placeholder: AnnounceActionPlaceholder,
pub transaction_id: TransactionId,
pub info_hash: InfoHash,
pub peer_id: PeerId,
pub bytes_downloaded: NumberOfBytes,
pub bytes_left: NumberOfBytes,
pub bytes_uploaded: NumberOfBytes,
pub event: AnnounceEventBytes,
pub ip_address: Ipv4AddrBytes,
pub key: PeerKey,
pub peers_wanted: NumberOfPeers,
pub port: Port,
}
bytes.write_all(&r.info_hash.0)?;
bytes.write_all(&r.peer_id.0)?;
/// Note: Request::from_bytes only creates this struct with value 1
#[derive(PartialEq, Eq, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct AnnounceActionPlaceholder(I32);
bytes.write_i64::<NetworkEndian>(r.bytes_downloaded.0)?;
bytes.write_i64::<NetworkEndian>(r.bytes_left.0)?;
bytes.write_i64::<NetworkEndian>(r.bytes_uploaded.0)?;
bytes.write_i32::<NetworkEndian>(r.event.to_i32())?;
bytes.write_all(&r.ip_address.map_or([0; 4], |ip| ip.octets()))?;
bytes.write_u32::<NetworkEndian>(r.key.0)?;
bytes.write_i32::<NetworkEndian>(r.peers_wanted.0)?;
bytes.write_u16::<NetworkEndian>(r.port.0)?;
}
Request::Scrape(r) => {
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
bytes.write_i32::<NetworkEndian>(2)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
for info_hash in r.info_hashes {
bytes.write_all(&info_hash.0)?;
}
}
}
Ok(())
impl Default for AnnounceActionPlaceholder {
fn default() -> Self {
Self(I32::new(1))
}
}
pub fn from_bytes(bytes: &[u8], max_scrape_torrents: u8) -> Result<Self, RequestParseError> {
let mut cursor = Cursor::new(bytes);
/// Note: Request::from_bytes only creates this struct with values 0..=3
#[derive(PartialEq, Eq, Clone, Copy, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(transparent)]
pub struct AnnounceEventBytes(I32);
let connection_id = cursor
.read_i64::<NetworkEndian>()
.map_err(RequestParseError::unsendable_io)?;
let action = cursor
.read_i32::<NetworkEndian>()
.map_err(RequestParseError::unsendable_io)?;
let transaction_id = cursor
.read_i32::<NetworkEndian>()
.map_err(RequestParseError::unsendable_io)?;
impl From<AnnounceEvent> for AnnounceEventBytes {
fn from(value: AnnounceEvent) -> Self {
Self(I32::new(match value {
AnnounceEvent::None => 0,
AnnounceEvent::Completed => 1,
AnnounceEvent::Started => 2,
AnnounceEvent::Stopped => 3,
}))
}
}
match action {
// Connect
0 => {
if connection_id == PROTOCOL_IDENTIFIER {
Ok((ConnectRequest {
transaction_id: TransactionId(transaction_id),
})
.into())
} else {
Err(RequestParseError::unsendable_text(
"Protocol identifier missing",
))
}
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum AnnounceEvent {
Started,
Stopped,
Completed,
None,
}
// Announce
1 => {
let mut info_hash = [0; 20];
let mut peer_id = [0; 20];
let mut ip = [0; 4];
impl From<AnnounceEventBytes> for AnnounceEvent {
fn from(value: AnnounceEventBytes) -> Self {
match value.0.get() {
1 => Self::Completed,
2 => Self::Started,
3 => Self::Stopped,
_ => Self::None,
}
}
}
cursor.read_exact(&mut info_hash).map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
cursor.read_exact(&mut peer_id).map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ScrapeRequest {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
pub info_hashes: Vec<InfoHash>,
}
let bytes_downloaded = cursor.read_i64::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let bytes_left = cursor.read_i64::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let bytes_uploaded = cursor.read_i64::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let event = cursor.read_i32::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
#[derive(Debug)]
pub enum RequestParseError {
Sendable {
connection_id: ConnectionId,
transaction_id: TransactionId,
err: &'static str,
},
Unsendable {
err: Either<io::Error, &'static str>,
},
}
cursor.read_exact(&mut ip).map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let key = cursor.read_u32::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let peers_wanted = cursor.read_i32::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let port = cursor.read_u16::<NetworkEndian>().map_err(|err| {
RequestParseError::sendable_io(err, connection_id, transaction_id)
})?;
let opt_ip = if ip == [0; 4] {
None
} else {
Some(Ipv4Addr::from(ip))
};
Ok((AnnounceRequest {
connection_id: ConnectionId(connection_id),
transaction_id: TransactionId(transaction_id),
info_hash: InfoHash(info_hash),
peer_id: PeerId(peer_id),
bytes_downloaded: NumberOfBytes(bytes_downloaded),
bytes_uploaded: NumberOfBytes(bytes_uploaded),
bytes_left: NumberOfBytes(bytes_left),
event: AnnounceEvent::from_i32(event),
ip_address: opt_ip,
key: PeerKey(key),
peers_wanted: NumberOfPeers(peers_wanted),
port: Port(port),
})
.into())
}
// Scrape
2 => {
let position = cursor.position() as usize;
let inner = cursor.into_inner();
let info_hashes: Vec<InfoHash> = (&inner[position..])
.chunks_exact(20)
.take(max_scrape_torrents as usize)
.map(|chunk| InfoHash(chunk.try_into().unwrap()))
.collect();
if info_hashes.is_empty() {
Err(RequestParseError::sendable_text(
"Full scrapes are not allowed",
connection_id,
transaction_id,
))
} else {
Ok((ScrapeRequest {
connection_id: ConnectionId(connection_id),
transaction_id: TransactionId(transaction_id),
info_hashes,
})
.into())
}
}
_ => Err(RequestParseError::sendable_text(
"Invalid action",
connection_id,
transaction_id,
)),
impl RequestParseError {
pub fn sendable_text(
text: &'static str,
connection_id: ConnectionId,
transaction_id: TransactionId,
) -> Self {
Self::Sendable {
connection_id,
transaction_id,
err: text,
}
}
pub fn unsendable_io(err: io::Error) -> Self {
Self::Unsendable {
err: Either::Left(err),
}
}
pub fn unsendable_text(text: &'static str) -> Self {
Self::Unsendable {
err: Either::Right(text),
}
}
}
@ -308,6 +263,7 @@ impl Request {
mod tests {
use quickcheck::TestResult;
use quickcheck_macros::quickcheck;
use zerocopy::network_endian::{I32, I64, U16};
use super::*;
@ -325,7 +281,7 @@ mod tests {
impl quickcheck::Arbitrary for ConnectRequest {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
transaction_id: TransactionId(i32::arbitrary(g)),
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
}
}
}
@ -333,18 +289,19 @@ mod tests {
impl quickcheck::Arbitrary for AnnounceRequest {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
connection_id: ConnectionId(i64::arbitrary(g)),
transaction_id: TransactionId(i32::arbitrary(g)),
connection_id: ConnectionId(I64::new(i64::arbitrary(g))),
action_placeholder: AnnounceActionPlaceholder::default(),
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
info_hash: InfoHash::arbitrary(g),
peer_id: PeerId::arbitrary(g),
bytes_downloaded: NumberOfBytes(i64::arbitrary(g)),
bytes_uploaded: NumberOfBytes(i64::arbitrary(g)),
bytes_left: NumberOfBytes(i64::arbitrary(g)),
event: AnnounceEvent::arbitrary(g),
ip_address: None,
key: PeerKey(u32::arbitrary(g)),
peers_wanted: NumberOfPeers(i32::arbitrary(g)),
port: Port(u16::arbitrary(g)),
bytes_downloaded: NumberOfBytes(I64::new(i64::arbitrary(g))),
bytes_uploaded: NumberOfBytes(I64::new(i64::arbitrary(g))),
bytes_left: NumberOfBytes(I64::new(i64::arbitrary(g))),
event: AnnounceEvent::arbitrary(g).into(),
ip_address: Ipv4AddrBytes::arbitrary(g),
key: PeerKey::new(i32::arbitrary(g)),
peers_wanted: NumberOfPeers(I32::new(i32::arbitrary(g))),
port: Port(U16::new(u16::arbitrary(g))),
}
}
}
@ -356,8 +313,8 @@ mod tests {
.collect();
Self {
connection_id: ConnectionId(i64::arbitrary(g)),
transaction_id: TransactionId(i32::arbitrary(g)),
connection_id: ConnectionId(I64::new(i64::arbitrary(g))),
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
info_hashes,
}
}
@ -372,7 +329,7 @@ mod tests {
let success = request == r2;
if !success {
println!("before: {:#?}\nafter: {:#?}", request, r2);
::pretty_assertions::assert_eq!(request, r2);
}
success

View file

@ -1,69 +1,117 @@
use std::borrow::Cow;
use std::convert::TryInto;
use std::io::{self, Cursor, Write};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::io::{self, Write};
use std::mem::size_of;
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
use byteorder::{NetworkEndian, WriteBytesExt};
use zerocopy::{AsBytes, FromBytes, FromZeroes};
use super::common::*;
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub struct TorrentScrapeStatistics {
pub seeders: NumberOfPeers,
pub completed: NumberOfDownloads,
pub leechers: NumberOfPeers,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ConnectResponse {
pub connection_id: ConnectionId,
pub transaction_id: TransactionId,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceResponse<I: Ip> {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
pub peers: Vec<ResponsePeer<I>>,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ScrapeResponse {
pub transaction_id: TransactionId,
pub torrent_stats: Vec<TorrentScrapeStatistics>,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ErrorResponse {
pub transaction_id: TransactionId,
pub message: Cow<'static, str>,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum Response {
Connect(ConnectResponse),
AnnounceIpv4(AnnounceResponse<Ipv4Addr>),
AnnounceIpv6(AnnounceResponse<Ipv6Addr>),
AnnounceIpv4(AnnounceResponse<Ipv4AddrBytes>),
AnnounceIpv6(AnnounceResponse<Ipv6AddrBytes>),
Scrape(ScrapeResponse),
Error(ErrorResponse),
}
impl Response {
#[inline]
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
match self {
Response::Connect(r) => r.write(bytes),
Response::AnnounceIpv4(r) => r.write(bytes),
Response::AnnounceIpv6(r) => r.write(bytes),
Response::Scrape(r) => r.write(bytes),
Response::Error(r) => r.write(bytes),
}
}
#[inline]
pub fn from_bytes(mut bytes: &[u8], ipv4: bool) -> Result<Self, io::Error> {
let action = read_i32_ne(&mut bytes)?;
match action.get() {
// Connect
0 => Ok(Response::Connect(
ConnectResponse::read_from_prefix(bytes).ok_or_else(invalid_data)?,
)),
// Announce
1 if ipv4 => {
let fixed =
AnnounceResponseFixedData::read_from_prefix(bytes).ok_or_else(invalid_data)?;
let peers = if let Some(bytes) = bytes.get(size_of::<AnnounceResponseFixedData>()..)
{
Vec::from(
ResponsePeer::<Ipv4AddrBytes>::slice_from(bytes)
.ok_or_else(invalid_data)?,
)
} else {
Vec::new()
};
Ok(Response::AnnounceIpv4(AnnounceResponse { fixed, peers }))
}
1 if !ipv4 => {
let fixed =
AnnounceResponseFixedData::read_from_prefix(bytes).ok_or_else(invalid_data)?;
let peers = if let Some(bytes) = bytes.get(size_of::<AnnounceResponseFixedData>()..)
{
Vec::from(
ResponsePeer::<Ipv6AddrBytes>::slice_from(bytes)
.ok_or_else(invalid_data)?,
)
} else {
Vec::new()
};
Ok(Response::AnnounceIpv6(AnnounceResponse { fixed, peers }))
}
// Scrape
2 => {
let transaction_id = read_i32_ne(&mut bytes).map(TransactionId)?;
let torrent_stats =
Vec::from(TorrentScrapeStatistics::slice_from(bytes).ok_or_else(invalid_data)?);
Ok((ScrapeResponse {
transaction_id,
torrent_stats,
})
.into())
}
// Error
3 => {
let transaction_id = read_i32_ne(&mut bytes).map(TransactionId)?;
let message = String::from_utf8_lossy(&bytes).into_owned().into();
Ok((ErrorResponse {
transaction_id,
message,
})
.into())
}
_ => Err(invalid_data()),
}
}
}
impl From<ConnectResponse> for Response {
fn from(r: ConnectResponse) -> Self {
Self::Connect(r)
}
}
impl From<AnnounceResponse<Ipv4Addr>> for Response {
fn from(r: AnnounceResponse<Ipv4Addr>) -> Self {
impl From<AnnounceResponse<Ipv4AddrBytes>> for Response {
fn from(r: AnnounceResponse<Ipv4AddrBytes>) -> Self {
Self::AnnounceIpv4(r)
}
}
impl From<AnnounceResponse<Ipv6Addr>> for Response {
fn from(r: AnnounceResponse<Ipv6Addr>) -> Self {
impl From<AnnounceResponse<Ipv6AddrBytes>> for Response {
fn from(r: AnnounceResponse<Ipv6AddrBytes>) -> Self {
Self::AnnounceIpv6(r)
}
}
@ -80,203 +128,135 @@ impl From<ErrorResponse> for Response {
}
}
impl Response {
#[derive(PartialEq, Eq, Clone, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(C, packed)]
pub struct ConnectResponse {
pub transaction_id: TransactionId,
pub connection_id: ConnectionId,
}
impl ConnectResponse {
#[inline]
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
match self {
Response::Connect(r) => {
bytes.write_i32::<NetworkEndian>(0)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
}
Response::AnnounceIpv4(r) => {
bytes.write_i32::<NetworkEndian>(1)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
for peer in r.peers.iter() {
bytes.write_all(&peer.ip_address.octets())?;
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
}
}
Response::AnnounceIpv6(r) => {
bytes.write_i32::<NetworkEndian>(1)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
for peer in r.peers.iter() {
bytes.write_all(&peer.ip_address.octets())?;
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
}
}
Response::Scrape(r) => {
bytes.write_i32::<NetworkEndian>(2)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
for torrent_stat in r.torrent_stats.iter() {
bytes.write_i32::<NetworkEndian>(torrent_stat.seeders.0)?;
bytes.write_i32::<NetworkEndian>(torrent_stat.completed.0)?;
bytes.write_i32::<NetworkEndian>(torrent_stat.leechers.0)?;
}
}
Response::Error(r) => {
bytes.write_i32::<NetworkEndian>(3)?;
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
bytes.write_all(r.message.as_bytes())?;
}
}
bytes.write_i32::<NetworkEndian>(0)?;
bytes.write_all(self.as_bytes())?;
Ok(())
}
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct AnnounceResponse<I: Ip> {
pub fixed: AnnounceResponseFixedData,
pub peers: Vec<ResponsePeer<I>>,
}
impl<I: Ip> AnnounceResponse<I> {
pub fn empty() -> Self {
Self {
fixed: FromZeroes::new_zeroed(),
peers: Default::default(),
}
}
#[inline]
pub fn from_bytes(bytes: &[u8], ipv4: bool) -> Result<Self, io::Error> {
let mut cursor = Cursor::new(bytes);
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
bytes.write_i32::<NetworkEndian>(1)?;
bytes.write_all(self.fixed.as_bytes())?;
bytes.write_all((*self.peers.as_slice()).as_bytes())?;
let action = cursor.read_i32::<NetworkEndian>()?;
let transaction_id = cursor.read_i32::<NetworkEndian>()?;
Ok(())
}
}
match action {
// Connect
0 => {
let connection_id = cursor.read_i64::<NetworkEndian>()?;
#[derive(PartialEq, Eq, Clone, Debug, AsBytes, FromBytes, FromZeroes)]
#[repr(C, packed)]
pub struct AnnounceResponseFixedData {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
}
Ok((ConnectResponse {
connection_id: ConnectionId(connection_id),
transaction_id: TransactionId(transaction_id),
})
.into())
}
// Announce
1 if ipv4 => {
let announce_interval = cursor.read_i32::<NetworkEndian>()?;
let leechers = cursor.read_i32::<NetworkEndian>()?;
let seeders = cursor.read_i32::<NetworkEndian>()?;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ScrapeResponse {
pub transaction_id: TransactionId,
pub torrent_stats: Vec<TorrentScrapeStatistics>,
}
let position = cursor.position() as usize;
let inner = cursor.into_inner();
impl ScrapeResponse {
#[inline]
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
bytes.write_i32::<NetworkEndian>(2)?;
bytes.write_all(self.transaction_id.as_bytes())?;
bytes.write_all((*self.torrent_stats.as_slice()).as_bytes())?;
let peers = inner[position..]
.chunks_exact(6)
.map(|chunk| {
let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap();
let ip_address = Ipv4Addr::from(ip_bytes);
let port = (&chunk[4..]).read_u16::<NetworkEndian>().unwrap();
Ok(())
}
}
ResponsePeer {
ip_address,
port: Port(port),
}
})
.collect();
#[derive(PartialEq, Eq, Debug, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
#[repr(C, packed)]
pub struct TorrentScrapeStatistics {
pub seeders: NumberOfPeers,
pub completed: NumberOfDownloads,
pub leechers: NumberOfPeers,
}
Ok((AnnounceResponse {
transaction_id: TransactionId(transaction_id),
announce_interval: AnnounceInterval(announce_interval),
leechers: NumberOfPeers(leechers),
seeders: NumberOfPeers(seeders),
peers,
})
.into())
}
1 if !ipv4 => {
let announce_interval = cursor.read_i32::<NetworkEndian>()?;
let leechers = cursor.read_i32::<NetworkEndian>()?;
let seeders = cursor.read_i32::<NetworkEndian>()?;
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct ErrorResponse {
pub transaction_id: TransactionId,
pub message: Cow<'static, str>,
}
let position = cursor.position() as usize;
let inner = cursor.into_inner();
impl ErrorResponse {
#[inline]
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
bytes.write_i32::<NetworkEndian>(3)?;
bytes.write_all(self.transaction_id.as_bytes())?;
bytes.write_all(self.message.as_bytes())?;
let peers = inner[position..]
.chunks_exact(18)
.map(|chunk| {
let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap();
let ip_address = Ipv6Addr::from(ip_bytes);
let port = (&chunk[16..]).read_u16::<NetworkEndian>().unwrap();
ResponsePeer {
ip_address,
port: Port(port),
}
})
.collect();
Ok((AnnounceResponse {
transaction_id: TransactionId(transaction_id),
announce_interval: AnnounceInterval(announce_interval),
leechers: NumberOfPeers(leechers),
seeders: NumberOfPeers(seeders),
peers,
})
.into())
}
// Scrape
2 => {
let position = cursor.position() as usize;
let inner = cursor.into_inner();
let stats = inner[position..]
.chunks_exact(12)
.map(|chunk| {
let mut cursor: Cursor<&[u8]> = Cursor::new(&chunk[..]);
let seeders = cursor.read_i32::<NetworkEndian>().unwrap();
let downloads = cursor.read_i32::<NetworkEndian>().unwrap();
let leechers = cursor.read_i32::<NetworkEndian>().unwrap();
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(downloads),
leechers: NumberOfPeers(leechers),
}
})
.collect();
Ok((ScrapeResponse {
transaction_id: TransactionId(transaction_id),
torrent_stats: stats,
})
.into())
}
// Error
3 => {
let position = cursor.position() as usize;
let inner = cursor.into_inner();
Ok((ErrorResponse {
transaction_id: TransactionId(transaction_id),
message: String::from_utf8_lossy(&inner[position..])
.into_owned()
.into(),
})
.into())
}
_ => Ok((ErrorResponse {
transaction_id: TransactionId(transaction_id),
message: "Invalid action".into(),
})
.into()),
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use quickcheck_macros::quickcheck;
use zerocopy::network_endian::I32;
use zerocopy::network_endian::I64;
use super::*;
impl quickcheck::Arbitrary for Ipv4AddrBytes {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self([
u8::arbitrary(g),
u8::arbitrary(g),
u8::arbitrary(g),
u8::arbitrary(g),
])
}
}
impl quickcheck::Arbitrary for Ipv6AddrBytes {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
let mut bytes = [0; 16];
for byte in bytes.iter_mut() {
*byte = u8::arbitrary(g)
}
Self(bytes)
}
}
impl quickcheck::Arbitrary for TorrentScrapeStatistics {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
seeders: NumberOfPeers(i32::arbitrary(g)),
completed: NumberOfDownloads(i32::arbitrary(g)),
leechers: NumberOfPeers(i32::arbitrary(g)),
seeders: NumberOfPeers(I32::new(i32::arbitrary(g))),
completed: NumberOfDownloads(I32::new(i32::arbitrary(g))),
leechers: NumberOfPeers(I32::new(i32::arbitrary(g))),
}
}
}
@ -284,8 +264,8 @@ mod tests {
impl quickcheck::Arbitrary for ConnectResponse {
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
Self {
connection_id: ConnectionId(i64::arbitrary(g)),
transaction_id: TransactionId(i32::arbitrary(g)),
connection_id: ConnectionId(I64::new(i64::arbitrary(g))),
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
}
}
}
@ -297,10 +277,12 @@ mod tests {
.collect();
Self {
transaction_id: TransactionId(i32::arbitrary(g)),
announce_interval: AnnounceInterval(i32::arbitrary(g)),
leechers: NumberOfPeers(i32::arbitrary(g)),
seeders: NumberOfPeers(i32::arbitrary(g)),
fixed: AnnounceResponseFixedData {
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
announce_interval: AnnounceInterval(I32::new(i32::arbitrary(g))),
leechers: NumberOfPeers(I32::new(i32::arbitrary(g))),
seeders: NumberOfPeers(I32::new(i32::arbitrary(g))),
},
peers,
}
}
@ -313,7 +295,7 @@ mod tests {
.collect();
Self {
transaction_id: TransactionId(i32::arbitrary(g)),
transaction_id: TransactionId(I32::new(i32::arbitrary(g))),
torrent_stats,
}
}
@ -328,7 +310,7 @@ mod tests {
let success = response == r2;
if !success {
println!("before: {:#?}\nafter: {:#?}", response, r2);
::pretty_assertions::assert_eq!(response, r2);
}
success
@ -340,12 +322,16 @@ mod tests {
}
#[quickcheck]
fn test_announce_response_ipv4_convert_identity(response: AnnounceResponse<Ipv4Addr>) -> bool {
fn test_announce_response_ipv4_convert_identity(
response: AnnounceResponse<Ipv4AddrBytes>,
) -> bool {
same_after_conversion(response.into(), true)
}
#[quickcheck]
fn test_announce_response_ipv6_convert_identity(response: AnnounceResponse<Ipv6Addr>) -> bool {
fn test_announce_response_ipv6_convert_identity(
response: AnnounceResponse<Ipv6AddrBytes>,
) -> bool {
same_after_conversion(response.into(), false)
}

18
docker/ci.Dockerfile Normal file
View file

@ -0,0 +1,18 @@
# Can be used to run file transfer CI test locally. Usage:
# 1. docker build -t aquatic -f ./docker/ci.Dockerfile .
# 2. docker run aquatic
# 3. On failure, run `docker rmi aquatic -f` and go back to step 1
FROM rust:bullseye
RUN mkdir "/opt/aquatic"
ENV "GITHUB_WORKSPACE" "/opt/aquatic"
WORKDIR "/opt/aquatic"
COPY ./.github/actions/test-file-transfers/entrypoint.sh entrypoint.sh
COPY Cargo.toml Cargo.lock ./
COPY crates crates
ENTRYPOINT ["./entrypoint.sh"]

View file

@ -1,5 +0,0 @@
#!/bin/sh
. ./scripts/env-native-cpu-without-avx-512
cargo run --profile "release-debug" -p aquatic_udp_bench -- $@

View file

@ -0,0 +1,8 @@
#!/bin/bash
# Run file transfer CI test locally
set -e
docker build -t aquatic -f ./docker/ci.Dockerfile .
docker run aquatic
docker rmi aquatic -f

View file

@ -0,0 +1,5 @@
#!/bin/bash
. ./scripts/env-native-cpu-without-avx-512
cargo run --profile "release-debug" -p aquatic_udp --features "io-uring" -- $@

View file

@ -2,4 +2,4 @@
. ./scripts/env-native-cpu-without-avx-512
cargo run --profile "release-debug" -p aquatic_udp --features "io-uring" -- $@
cargo run --profile "release-debug" -p aquatic_udp -- $@

View file

@ -1,4 +0,0 @@
#!/bin/bash
docker build -t aquatic-test-transfers .github/actions/test-transfers
docker run -it aquatic-test-transfers bash