udp: add separate config flag for prometheus peer client reports

This commit is contained in:
Joakim Frostegård 2023-06-07 13:28:38 +02:00
parent 08239dff1f
commit 47b45f28d5
2 changed files with 37 additions and 23 deletions

View file

@ -161,9 +161,10 @@ impl Default for ProtocolConfig {
pub struct StatisticsConfig {
/// Collect and print/write statistics this often (seconds)
pub interval: u64,
/// Enable extended statistics (on peers per torrent and on peer clients)
/// Enable extended statistics (on peers per torrent and on peer clients).
/// Also, see `prometheus_peer_clients`.
///
/// Will increase time taken for request handling and torrent cleaning
/// Will increase time taken for request handling and torrent cleaning.
pub extended: bool,
/// Print statistics to standard output
pub print_to_stdout: bool,
@ -177,6 +178,14 @@ pub struct StatisticsConfig {
/// Address to run prometheus endpoint on
#[cfg(feature = "prometheus")]
pub prometheus_endpoint_address: SocketAddr,
/// Serve information on all peer clients on the prometheus endpoint.
/// Requires extended statistics to be activated.
///
/// NOT RECOMMENDED. May consume lots of CPU and RAM since data on every
/// single peer client will be kept around by the endpoint, even those
/// which are no longer in the swarm.
#[cfg(feature = "prometheus")]
pub prometheus_peer_clients: bool,
}
impl StatisticsConfig {
@ -206,6 +215,8 @@ impl Default for StatisticsConfig {
run_prometheus_endpoint: false,
#[cfg(feature = "prometheus")]
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
#[cfg(feature = "prometheus")]
prometheus_peer_clients: false,
}
}
}

View file

@ -81,39 +81,42 @@ pub fn run_statistics_worker(
StatisticsMessage::Ipv4PeerHistogram(h) => ipv4_collector.add_histogram(&config, h),
StatisticsMessage::Ipv6PeerHistogram(h) => ipv6_collector.add_histogram(&config, h),
StatisticsMessage::PeerAdded(peer_id) => {
let client = peer_id.client();
let first_8_bytes_hex = peer_id.first_8_bytes_hex();
let peer_client = peer_id.client();
#[cfg(feature = "prometheus")]
if config.statistics.run_prometheus_endpoint {
if config.statistics.run_prometheus_endpoint
&& config.statistics.prometheus_peer_clients
{
::metrics::increment_gauge!(
"aquatic_peer_clients",
1.0,
"client" => client.to_string(),
"peer_id_prefix_hex" => first_8_bytes_hex.to_string(),
);
}
*peer_clients.entry(client).or_insert(0) += 1;
}
StatisticsMessage::PeerRemoved(peer_id) => {
let client = peer_id.client();
#[cfg(feature = "prometheus")]
if config.statistics.run_prometheus_endpoint {
::metrics::decrement_gauge!(
"aquatic_peer_clients",
1.0,
"client" => client.to_string(),
"client" => peer_client.to_string(),
"peer_id_prefix_hex" => peer_id.first_8_bytes_hex().to_string(),
);
}
if let Some(count) = peer_clients.get_mut(&client) {
*peer_clients.entry(peer_client).or_insert(0) += 1;
}
StatisticsMessage::PeerRemoved(peer_id) => {
let peer_client = peer_id.client();
#[cfg(feature = "prometheus")]
if config.statistics.run_prometheus_endpoint
&& config.statistics.prometheus_peer_clients
{
::metrics::decrement_gauge!(
"aquatic_peer_clients",
1.0,
"client" => peer_client.to_string(),
"peer_id_prefix_hex" => peer_id.first_8_bytes_hex().to_string(),
);
}
if let Some(count) = peer_clients.get_mut(&peer_client) {
if *count == 1 {
drop(count);
peer_clients.remove(&client);
peer_clients.remove(&peer_client);
} else {
*count -= 1;
}