Merge pull request #115 from greatest-ape/prometheus

ws: add prometheus support
This commit is contained in:
Joakim Frostegård 2023-01-17 22:03:19 +01:00 committed by GitHub
commit 3cfe044297
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 308 additions and 35 deletions

View file

@ -29,7 +29,7 @@ jobs:
run: | run: |
cargo build --verbose -p aquatic_udp --features "cpu-pinning" cargo build --verbose -p aquatic_udp --features "cpu-pinning"
cargo build --verbose -p aquatic_http cargo build --verbose -p aquatic_http
cargo build --verbose -p aquatic_ws cargo build --verbose -p aquatic_ws --features "prometheus"
build-macos: build-macos:
runs-on: macos-latest runs-on: macos-latest

117
Cargo.lock generated
View file

@ -309,6 +309,8 @@ dependencies = [
"hashbrown 0.13.1", "hashbrown 0.13.1",
"httparse", "httparse",
"log", "log",
"metrics",
"metrics-exporter-prometheus",
"mimalloc", "mimalloc",
"privdrop", "privdrop",
"quickcheck", "quickcheck",
@ -1168,7 +1170,7 @@ dependencies = [
"cfg-if", "cfg-if",
"js-sys", "js-sys",
"libc", "libc",
"wasi", "wasi 0.11.0+wasi-snapshot-preview1",
"wasm-bindgen", "wasm-bindgen",
] ]
@ -1230,7 +1232,7 @@ dependencies = [
"scoped-tls", "scoped-tls",
"scopeguard", "scopeguard",
"signal-hook", "signal-hook",
"sketches-ddsketch", "sketches-ddsketch 0.1.3",
"smallvec", "smallvec",
"socket2 0.3.19", "socket2 0.3.19",
"tracing", "tracing",
@ -1507,6 +1509,12 @@ dependencies = [
"memoffset 0.5.6", "memoffset 0.5.6",
] ]
[[package]]
name = "ipnet"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146"
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.10.5" version = "0.10.5"
@ -1606,6 +1614,15 @@ dependencies = [
"cfg-if", "cfg-if",
] ]
[[package]]
name = "mach"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
dependencies = [
"libc",
]
[[package]] [[package]]
name = "matchit" name = "matchit"
version = "0.5.0" version = "0.5.0"
@ -1656,6 +1673,63 @@ dependencies = [
"autocfg", "autocfg",
] ]
[[package]]
name = "metrics"
version = "0.20.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b9b8653cec6897f73b519a43fba5ee3d50f62fe9af80b428accdcc093b4a849"
dependencies = [
"ahash 0.7.6",
"metrics-macros",
"portable-atomic",
]
[[package]]
name = "metrics-exporter-prometheus"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70"
dependencies = [
"hyper",
"indexmap",
"ipnet",
"metrics",
"metrics-util",
"parking_lot 0.12.1",
"portable-atomic",
"quanta",
"thiserror",
"tokio",
]
[[package]]
name = "metrics-macros"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "731f8ecebd9f3a4aa847dfe75455e4757a45da40a7793d2f0b1f9b6ed18b23f3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "metrics-util"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7d24dc2dbae22bff6f1f9326ffce828c9f07ef9cc1e8002e5279f845432a30a"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
"hashbrown 0.12.3",
"metrics",
"num_cpus",
"parking_lot 0.12.1",
"portable-atomic",
"quanta",
"sketches-ddsketch 0.2.0",
]
[[package]] [[package]]
name = "mimalloc" name = "mimalloc"
version = "0.1.34" version = "0.1.34"
@ -1694,7 +1768,7 @@ checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
"wasi", "wasi 0.11.0+wasi-snapshot-preview1",
"windows-sys", "windows-sys",
] ]
@ -2100,6 +2174,22 @@ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]]
name = "quanta"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e31331286705f455e56cca62e0e717158474ff02b7936c1fa596d983f4ae27"
dependencies = [
"crossbeam-utils",
"libc",
"mach",
"once_cell",
"raw-cpuid",
"wasi 0.10.2+wasi-snapshot-preview1",
"web-sys",
"winapi 0.3.9",
]
[[package]] [[package]]
name = "quickcheck" name = "quickcheck"
version = "1.0.3" version = "1.0.3"
@ -2171,6 +2261,15 @@ dependencies = [
"rand", "rand",
] ]
[[package]]
name = "raw-cpuid"
version = "10.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6823ea29436221176fe662da99998ad3b4db2c7f31e7b6f5fe43adccd6320bb"
dependencies = [
"bitflags 1.3.2",
]
[[package]] [[package]]
name = "rayon" name = "rayon"
version = "1.6.1" version = "1.6.1"
@ -2478,6 +2577,12 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04d2ecae5fcf33b122e2e6bd520a57ccf152d2dde3b38c71039df1a6867264ee" checksum = "04d2ecae5fcf33b122e2e6bd520a57ccf152d2dde3b38c71039df1a6867264ee"
[[package]]
name = "sketches-ddsketch"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ceb945e54128e09c43d8e4f1277851bd5044c6fc540bbaa2ad888f60b3da9ae7"
[[package]] [[package]]
name = "slab" name = "slab"
version = "0.4.7" version = "0.4.7"
@ -3080,6 +3185,12 @@ dependencies = [
"try-lock", "try-lock",
] ]
[[package]]
name = "wasi"
version = "0.10.2+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
[[package]] [[package]]
name = "wasi" name = "wasi"
version = "0.11.0+wasi-snapshot-preview1" version = "0.11.0+wasi-snapshot-preview1"

View file

@ -232,6 +232,29 @@ proxied to IPv4 requests, and IPv6 requests to IPv6 requests.
More details are available [here](./documents/aquatic-ws-load-test-2022-03-29.pdf). Please note that request workers have been renamed to swarm workers. More details are available [here](./documents/aquatic-ws-load-test-2022-03-29.pdf). Please note that request workers have been renamed to swarm workers.
#### Prometheus
`aquatic_ws` supports exporting [Prometheus](https://prometheus.io/) metrics.
Pass the `prometheus` feature when building:
```sh
. ./scripts/env-native-cpu-without-avx-512
cargo build --release -p aquatic_ws --features "prometheus"
```
Then activate the prometheus endpoint in the configuration file:
```toml
[metrics]
# Run a prometheus endpoint
run_prometheus_endpoint = true
# Address to run prometheus endpoint on
prometheus_endpoint_address = "0.0.0.0:9000"
# Update metrics for torrent count this often (seconds)
torrent_count_update_interval = 10
```
## Load testing ## Load testing
There are load test binaries for all protocols. They use a CLI structure There are load test binaries for all protocols. They use a CLI structure

View file

@ -16,6 +16,10 @@ name = "aquatic_ws"
[[bin]] [[bin]]
name = "aquatic_ws" name = "aquatic_ws"
[features]
prometheus = ["metrics", "metrics-exporter-prometheus"]
metrics = ["dep:metrics"]
[dependencies] [dependencies]
aquatic_common = { workspace = true, features = ["rustls", "glommio"] } aquatic_common = { workspace = true, features = ["rustls", "glommio"] }
aquatic_toml_config.workspace = true aquatic_toml_config.workspace = true
@ -31,6 +35,8 @@ glommio = "0.7"
hashbrown = { version = "0.13", features = ["serde"] } hashbrown = { version = "0.13", features = ["serde"] }
httparse = "1" httparse = "1"
log = "0.4" log = "0.4"
metrics = { version = "0.20", optional = true }
metrics-exporter-prometheus = { version = "0.11", optional = true, default-features = false, features = ["http-listener"] }
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
privdrop = "0.5" privdrop = "0.5"
rand = { version = "0.8", features = ["small_rng"] } rand = { version = "0.8", features = ["small_rng"] }

View file

@ -28,6 +28,8 @@ pub struct Config {
pub cleaning: CleaningConfig, pub cleaning: CleaningConfig,
pub privileges: PrivilegeConfig, pub privileges: PrivilegeConfig,
pub access_list: AccessListConfig, pub access_list: AccessListConfig,
#[cfg(feature = "metrics")]
pub metrics: MetricsConfig,
pub cpu_pinning: CpuPinningConfigAsc, pub cpu_pinning: CpuPinningConfigAsc,
} }
@ -42,6 +44,8 @@ impl Default for Config {
cleaning: CleaningConfig::default(), cleaning: CleaningConfig::default(),
privileges: PrivilegeConfig::default(), privileges: PrivilegeConfig::default(),
access_list: AccessListConfig::default(), access_list: AccessListConfig::default(),
#[cfg(feature = "metrics")]
metrics: Default::default(),
cpu_pinning: Default::default(), cpu_pinning: Default::default(),
} }
} }
@ -142,6 +146,29 @@ impl Default for CleaningConfig {
} }
} }
#[cfg(feature = "metrics")]
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct MetricsConfig {
/// Run a prometheus endpoint
pub run_prometheus_endpoint: bool,
/// Address to run prometheus endpoint on
pub prometheus_endpoint_address: SocketAddr,
/// Update metrics for torrent count this often (seconds)
pub torrent_count_update_interval: u64,
}
#[cfg(feature = "metrics")]
impl Default for MetricsConfig {
fn default() -> Self {
Self {
run_prometheus_endpoint: false,
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
torrent_count_update_interval: 10,
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Config; use super::Config;

View file

@ -35,6 +35,21 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
let mut signals = Signals::new([SIGUSR1, SIGTERM])?; let mut signals = Signals::new([SIGUSR1, SIGTERM])?;
#[cfg(feature = "prometheus")]
if config.metrics.run_prometheus_endpoint {
use metrics_exporter_prometheus::PrometheusBuilder;
PrometheusBuilder::new()
.with_http_listener(config.metrics.prometheus_endpoint_address)
.install()
.with_context(|| {
format!(
"Install prometheus endpoint on {}",
config.metrics.prometheus_endpoint_address
)
})?;
}
let state = State::default(); let state = State::default();
update_access_list(&config.access_list, &state.access_list)?; update_access_list(&config.access_list, &state.access_list)?;

View file

@ -152,6 +152,13 @@ pub async fn run_socket_worker(
::log::trace!("accepting stream, assigning id {}", key); ::log::trace!("accepting stream, assigning id {}", key);
let task_handle = spawn_local_into(enclose!((config, access_list, control_message_senders, in_message_senders, connection_slab, opt_tls_config) async move { let task_handle = spawn_local_into(enclose!((config, access_list, control_message_senders, in_message_senders, connection_slab, opt_tls_config) async move {
#[cfg(feature = "metrics")]
::metrics::increment_gauge!(
"aquatic_active_connections",
1.0,
"ip_version" => ip_version_to_metrics_str(ip_version)
);
if let Err(err) = run_connection( if let Err(err) = run_connection(
config.clone(), config.clone(),
access_list, access_list,
@ -173,6 +180,13 @@ pub async fn run_socket_worker(
// Clean up after closed connection // Clean up after closed connection
#[cfg(feature = "metrics")]
::metrics::decrement_gauge!(
"aquatic_active_connections",
1.0,
"ip_version" => ip_version_to_metrics_str(ip_version)
);
// Remove reference in separate statement to avoid // Remove reference in separate statement to avoid
// multiple RefCell borrows // multiple RefCell borrows
let opt_reference = connection_slab.borrow_mut().try_remove(key); let opt_reference = connection_slab.borrow_mut().try_remove(key);
@ -419,6 +433,7 @@ async fn run_stream_agnostic_connection<
pending_scrape_slab, pending_scrape_slab,
connection_id, connection_id,
server_start_instant, server_start_instant,
ip_version,
}; };
let result = writer.run_out_message_loop().await; let result = writer.run_out_message_loop().await;
@ -501,6 +516,13 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
async fn handle_in_message(&mut self, in_message: InMessage) -> anyhow::Result<()> { async fn handle_in_message(&mut self, in_message: InMessage) -> anyhow::Result<()> {
match in_message { match in_message {
InMessage::AnnounceRequest(announce_request) => { InMessage::AnnounceRequest(announce_request) => {
#[cfg(feature = "metrics")]
::metrics::increment_counter!(
"aquatic_requests_total",
"type" => "announce",
"ip_version" => ip_version_to_metrics_str(self.ip_version)
);
let info_hash = announce_request.info_hash; let info_hash = announce_request.info_hash;
if self if self
@ -571,6 +593,13 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
} }
} }
InMessage::ScrapeRequest(ScrapeRequest { info_hashes, .. }) => { InMessage::ScrapeRequest(ScrapeRequest { info_hashes, .. }) => {
#[cfg(feature = "metrics")]
::metrics::increment_counter!(
"aquatic_requests_total",
"type" => "scrape",
"ip_version" => ip_version_to_metrics_str(self.ip_version)
);
let info_hashes = if let Some(info_hashes) = info_hashes { let info_hashes = if let Some(info_hashes) = info_hashes {
info_hashes info_hashes
} else { } else {
@ -642,10 +671,22 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
info_hash, info_hash,
}); });
self.out_message_sender let result = self
.out_message_sender
.send((self.make_connection_meta(None).into(), out_message)) .send((self.make_connection_meta(None).into(), out_message))
.await .await
.map_err(|err| anyhow::anyhow!("ConnectionReader::send_error_response failed: {}", err)) .map_err(|err| {
anyhow::anyhow!("ConnectionReader::send_error_response failed: {}", err)
});
#[cfg(feature = "metrics")]
::metrics::increment_counter!(
"aquatic_requests_total",
"type" => "error",
"ip_version" => ip_version_to_metrics_str(self.ip_version)
);
result
} }
fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> InMessageMeta { fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> InMessageMeta {
@ -666,6 +707,7 @@ struct ConnectionWriter<S> {
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>, pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
server_start_instant: ServerStartInstant, server_start_instant: ServerStartInstant,
connection_id: ConnectionId, connection_id: ConnectionId,
ip_version: IpVersion,
} }
impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> { impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
@ -728,6 +770,23 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
match result { match result {
Ok(Ok(())) => { Ok(Ok(())) => {
#[cfg(feature = "metrics")]
{
let out_message_type = match &out_message {
OutMessage::Offer(_) => "offer",
OutMessage::Answer(_) => "offer_answer",
OutMessage::AnnounceResponse(_) => "announce",
OutMessage::ScrapeResponse(_) => "scrape",
OutMessage::ErrorResponse(_) => "error",
};
::metrics::increment_counter!(
"aquatic_responses_total",
"type" => out_message_type,
"ip_version" => ip_version_to_metrics_str(self.ip_version),
);
}
self.connection_slab self.connection_slab
.borrow_mut() .borrow_mut()
.get_mut(self.connection_id.0) .get_mut(self.connection_id.0)
@ -807,3 +866,11 @@ fn create_tcp_listener(
Ok(unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) }) Ok(unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) })
} }
#[cfg(feature = "metrics")]
fn ip_version_to_metrics_str(ip_version: IpVersion) -> &'static str {
match ip_version {
IpVersion::V4 => "4",
IpVersion::V6 => "6",
}
}

View file

@ -58,7 +58,6 @@ type PeerMap = IndexMap<PeerId, Peer>;
struct TorrentData { struct TorrentData {
pub peers: PeerMap, pub peers: PeerMap,
pub num_seeders: usize, pub num_seeders: usize,
pub num_leechers: usize,
} }
impl Default for TorrentData { impl Default for TorrentData {
@ -67,7 +66,6 @@ impl Default for TorrentData {
Self { Self {
peers: Default::default(), peers: Default::default(),
num_seeders: 0, num_seeders: 0,
num_leechers: 0,
} }
} }
} }
@ -77,11 +75,13 @@ impl TorrentData {
if let Some(peer) = self.peers.remove(&peer_id) { if let Some(peer) = self.peers.remove(&peer_id) {
if peer.seeder { if peer.seeder {
self.num_seeders -= 1; self.num_seeders -= 1;
} else {
self.num_leechers -= 1;
} }
} }
} }
pub fn num_leechers(&self) -> usize {
self.peers.len() - self.num_seeders
}
} }
type TorrentMap = AmortizedIndexMap<InfoHash, TorrentData>; type TorrentMap = AmortizedIndexMap<InfoHash, TorrentData>;
@ -102,8 +102,8 @@ impl TorrentMaps {
let mut access_list_cache = create_access_list_cache(access_list); let mut access_list_cache = create_access_list_cache(access_list);
let now = server_start_instant.seconds_elapsed(); let now = server_start_instant.seconds_elapsed();
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now); Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now, "4");
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now); Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now, "6");
} }
fn clean_torrent_map( fn clean_torrent_map(
@ -111,7 +111,10 @@ impl TorrentMaps {
access_list_cache: &mut AccessListCache, access_list_cache: &mut AccessListCache,
torrent_map: &mut TorrentMap, torrent_map: &mut TorrentMap,
now: SecondsSinceServerStart, now: SecondsSinceServerStart,
ip_version: &'static str,
) { ) {
let mut total_num_peers = 0u64;
torrent_map.retain(|info_hash, torrent_data| { torrent_map.retain(|info_hash, torrent_data| {
if !access_list_cache if !access_list_cache
.load() .load()
@ -121,26 +124,28 @@ impl TorrentMaps {
} }
let num_seeders = &mut torrent_data.num_seeders; let num_seeders = &mut torrent_data.num_seeders;
let num_leechers = &mut torrent_data.num_leechers;
torrent_data.peers.retain(|_, peer| { torrent_data.peers.retain(|_, peer| {
let keep = peer.valid_until.valid(now); let keep = peer.valid_until.valid(now);
if !keep { if (!keep) & peer.seeder {
if peer.seeder { *num_seeders -= 1;
*num_seeders -= 1;
} else {
*num_leechers -= 1;
}
} }
keep keep
}); });
total_num_peers += torrent_data.peers.len() as u64;
!torrent_data.peers.is_empty() !torrent_data.peers.is_empty()
}); });
torrent_map.shrink_to_fit(); torrent_map.shrink_to_fit();
let total_num_peers = total_num_peers as f64;
#[cfg(feature = "metrics")]
::metrics::gauge!("aquatic_peers", total_num_peers, "ip_version" => ip_version);
} }
} }
@ -175,6 +180,27 @@ pub async fn run_swarm_worker(
})() })()
})); }));
// Periodically update torrent count metrics
#[cfg(feature = "metrics")]
TimerActionRepeat::repeat(enclose!((config, torrents) move || {
enclose!((config, torrents) move || async move {
let torrents = torrents.borrow_mut();
::metrics::gauge!(
"aquatic_torrents",
torrents.ipv4.len() as f64,
"ip_version" => "4"
);
::metrics::gauge!(
"aquatic_torrents",
torrents.ipv6.len() as f64,
"ip_version" => "6"
);
Some(Duration::from_secs(config.metrics.torrent_count_update_interval))
})()
}));
let mut handles = Vec::new(); let mut handles = Vec::new();
for (_, receiver) in control_message_receivers.streams() { for (_, receiver) in control_message_receivers.streams() {
@ -307,11 +333,12 @@ fn handle_announce_request(
request_sender_meta: InMessageMeta, request_sender_meta: InMessageMeta,
request: AnnounceRequest, request: AnnounceRequest,
) { ) {
let torrent_data: &mut TorrentData = if let IpVersion::V4 = request_sender_meta.ip_version { let (torrent_data, ip_version): (&mut TorrentData, &'static str) =
torrent_maps.ipv4.entry(request.info_hash).or_default() if let IpVersion::V4 = request_sender_meta.ip_version {
} else { (torrent_maps.ipv4.entry(request.info_hash).or_default(), "4")
torrent_maps.ipv6.entry(request.info_hash).or_default() } else {
}; (torrent_maps.ipv6.entry(request.info_hash).or_default(), "6")
};
// If there is already a peer with this peer_id, check that connection id // If there is already a peer with this peer_id, check that connection id
// is same as that of request sender. Otherwise, ignore request. Since // is same as that of request sender. Otherwise, ignore request. Since
@ -334,8 +361,6 @@ fn handle_announce_request(
let opt_removed_peer = match peer_status { let opt_removed_peer = match peer_status {
PeerStatus::Leeching => { PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
let peer = Peer { let peer = Peer {
connection_id: request_sender_meta.connection_id, connection_id: request_sender_meta.connection_id,
consumer_id: request_sender_meta.out_message_consumer_id, consumer_id: request_sender_meta.out_message_consumer_id,
@ -360,14 +385,13 @@ fn handle_announce_request(
PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id), PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id),
}; };
match opt_removed_peer.map(|peer| peer.seeder) { if let Some(removed_peer) = opt_removed_peer {
Some(false) => { if removed_peer.seeder {
torrent_data.num_leechers -= 1;
}
Some(true) => {
torrent_data.num_seeders -= 1; torrent_data.num_seeders -= 1;
} }
_ => {} } else {
#[cfg(feature = "metrics")]
::metrics::increment_gauge!("aquatic_peers", 1.0, "ip_version" => ip_version);
} }
} }
@ -437,7 +461,7 @@ fn handle_announce_request(
action: AnnounceAction, action: AnnounceAction,
info_hash: request.info_hash, info_hash: request.info_hash,
complete: torrent_data.num_seeders, complete: torrent_data.num_seeders,
incomplete: torrent_data.num_leechers, incomplete: torrent_data.num_leechers(),
announce_interval: config.protocol.peer_announce_interval, announce_interval: config.protocol.peer_announce_interval,
}); });
@ -475,7 +499,7 @@ fn handle_scrape_request(
let stats = ScrapeStatistics { let stats = ScrapeStatistics {
complete: torrent_data.num_seeders, complete: torrent_data.num_seeders,
downloaded: 0, // No implementation planned downloaded: 0, // No implementation planned
incomplete: torrent_data.num_leechers, incomplete: torrent_data.num_leechers(),
}; };
out_message.files.insert(info_hash, stats); out_message.files.insert(info_hash, stats);

View file

@ -2,4 +2,4 @@
. ./scripts/env-native-cpu-without-avx-512 . ./scripts/env-native-cpu-without-avx-512
cargo run --profile "release-debug" -p aquatic_ws -- $@ cargo run --profile "release-debug" -p aquatic_ws --features "prometheus" -- $@