mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-04-01 02:05:30 +00:00
Move all crates to new crates dir
This commit is contained in:
parent
3835da22ac
commit
9b032f7e24
128 changed files with 27 additions and 26 deletions
318
crates/udp/src/workers/statistics/collector.rs
Normal file
318
crates/udp/src/workers/statistics/collector.rs
Normal file
|
|
@ -0,0 +1,318 @@
|
|||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use hdrhistogram::Histogram;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::common::Statistics;
|
||||
use crate::config::Config;
|
||||
|
||||
pub struct StatisticsCollector {
|
||||
shared: Arc<Statistics>,
|
||||
last_update: Instant,
|
||||
pending_histograms: Vec<Histogram<u64>>,
|
||||
last_complete_histogram: PeerHistogramStatistics,
|
||||
#[cfg(feature = "prometheus")]
|
||||
ip_version: String,
|
||||
}
|
||||
|
||||
impl StatisticsCollector {
|
||||
pub fn new(shared: Arc<Statistics>, #[cfg(feature = "prometheus")] ip_version: String) -> Self {
|
||||
Self {
|
||||
shared,
|
||||
last_update: Instant::now(),
|
||||
pending_histograms: Vec::new(),
|
||||
last_complete_histogram: Default::default(),
|
||||
#[cfg(feature = "prometheus")]
|
||||
ip_version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_histogram(&mut self, config: &Config, histogram: Histogram<u64>) {
|
||||
self.pending_histograms.push(histogram);
|
||||
|
||||
if self.pending_histograms.len() == config.swarm_workers {
|
||||
self.last_complete_histogram =
|
||||
PeerHistogramStatistics::new(self.pending_histograms.drain(..).sum());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_from_shared(
|
||||
&mut self,
|
||||
#[cfg(feature = "prometheus")] config: &Config,
|
||||
) -> CollectedStatistics {
|
||||
let requests_received = Self::fetch_and_reset(&self.shared.requests_received);
|
||||
let responses_sent_connect = Self::fetch_and_reset(&self.shared.responses_sent_connect);
|
||||
let responses_sent_announce = Self::fetch_and_reset(&self.shared.responses_sent_announce);
|
||||
let responses_sent_scrape = Self::fetch_and_reset(&self.shared.responses_sent_scrape);
|
||||
let responses_sent_error = Self::fetch_and_reset(&self.shared.responses_sent_error);
|
||||
|
||||
let bytes_received = Self::fetch_and_reset(&self.shared.bytes_received);
|
||||
let bytes_sent = Self::fetch_and_reset(&self.shared.bytes_sent);
|
||||
|
||||
let num_torrents_by_worker: Vec<usize> = self
|
||||
.shared
|
||||
.torrents
|
||||
.iter()
|
||||
.map(|n| n.load(Ordering::Relaxed))
|
||||
.collect();
|
||||
let num_peers_by_worker: Vec<usize> = self
|
||||
.shared
|
||||
.peers
|
||||
.iter()
|
||||
.map(|n| n.load(Ordering::Relaxed))
|
||||
.collect();
|
||||
|
||||
let elapsed = {
|
||||
let now = Instant::now();
|
||||
|
||||
let elapsed = (now - self.last_update).as_secs_f64();
|
||||
|
||||
self.last_update = now;
|
||||
|
||||
elapsed
|
||||
};
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_requests_total",
|
||||
requests_received.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_connect.try_into().unwrap(),
|
||||
"type" => "connect",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_announce.try_into().unwrap(),
|
||||
"type" => "announce",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_scrape.try_into().unwrap(),
|
||||
"type" => "scrape",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_error.try_into().unwrap(),
|
||||
"type" => "error",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_rx_bytes",
|
||||
bytes_received.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_tx_bytes",
|
||||
bytes_sent.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
|
||||
for (worker_index, n) in num_torrents_by_worker.iter().copied().enumerate() {
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
n as f64,
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
}
|
||||
for (worker_index, n) in num_peers_by_worker.iter().copied().enumerate() {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
n as f64,
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
if config.statistics.torrent_peer_histograms {
|
||||
self.last_complete_histogram
|
||||
.update_metrics(self.ip_version.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let num_peers: usize = num_peers_by_worker.into_iter().sum();
|
||||
let num_torrents: usize = num_torrents_by_worker.into_iter().sum();
|
||||
|
||||
let requests_per_second = requests_received as f64 / elapsed;
|
||||
let responses_per_second_connect = responses_sent_connect as f64 / elapsed;
|
||||
let responses_per_second_announce = responses_sent_announce as f64 / elapsed;
|
||||
let responses_per_second_scrape = responses_sent_scrape as f64 / elapsed;
|
||||
let responses_per_second_error = responses_sent_error as f64 / elapsed;
|
||||
let bytes_received_per_second = bytes_received as f64 / elapsed;
|
||||
let bytes_sent_per_second = bytes_sent as f64 / elapsed;
|
||||
|
||||
let responses_per_second_total = responses_per_second_connect
|
||||
+ responses_per_second_announce
|
||||
+ responses_per_second_scrape
|
||||
+ responses_per_second_error;
|
||||
|
||||
CollectedStatistics {
|
||||
requests_per_second: (requests_per_second as usize).to_formatted_string(&Locale::en),
|
||||
responses_per_second_total: (responses_per_second_total as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_connect: (responses_per_second_connect as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_announce: (responses_per_second_announce as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_scrape: (responses_per_second_scrape as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_error: (responses_per_second_error as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
rx_mbits: format!("{:.2}", bytes_received_per_second * 8.0 / 1_000_000.0),
|
||||
tx_mbits: format!("{:.2}", bytes_sent_per_second * 8.0 / 1_000_000.0),
|
||||
num_torrents: num_torrents.to_formatted_string(&Locale::en),
|
||||
num_peers: num_peers.to_formatted_string(&Locale::en),
|
||||
peer_histogram: self.last_complete_histogram.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fetch_and_reset(atomic: &AtomicUsize) -> usize {
|
||||
atomic.fetch_and(0, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct CollectedStatistics {
|
||||
pub requests_per_second: String,
|
||||
pub responses_per_second_total: String,
|
||||
pub responses_per_second_connect: String,
|
||||
pub responses_per_second_announce: String,
|
||||
pub responses_per_second_scrape: String,
|
||||
pub responses_per_second_error: String,
|
||||
pub rx_mbits: String,
|
||||
pub tx_mbits: String,
|
||||
pub num_torrents: String,
|
||||
pub num_peers: String,
|
||||
pub peer_histogram: PeerHistogramStatistics,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
pub struct PeerHistogramStatistics {
|
||||
pub min: u64,
|
||||
pub p10: u64,
|
||||
pub p20: u64,
|
||||
pub p30: u64,
|
||||
pub p40: u64,
|
||||
pub p50: u64,
|
||||
pub p60: u64,
|
||||
pub p70: u64,
|
||||
pub p80: u64,
|
||||
pub p90: u64,
|
||||
pub p95: u64,
|
||||
pub p99: u64,
|
||||
pub p999: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl PeerHistogramStatistics {
|
||||
fn new(h: Histogram<u64>) -> Self {
|
||||
Self {
|
||||
min: h.min(),
|
||||
p10: h.value_at_percentile(10.0),
|
||||
p20: h.value_at_percentile(20.0),
|
||||
p30: h.value_at_percentile(30.0),
|
||||
p40: h.value_at_percentile(40.0),
|
||||
p50: h.value_at_percentile(50.0),
|
||||
p60: h.value_at_percentile(60.0),
|
||||
p70: h.value_at_percentile(70.0),
|
||||
p80: h.value_at_percentile(80.0),
|
||||
p90: h.value_at_percentile(90.0),
|
||||
p95: h.value_at_percentile(95.0),
|
||||
p99: h.value_at_percentile(99.0),
|
||||
p999: h.value_at_percentile(99.9),
|
||||
max: h.max(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
fn update_metrics(&self, ip_version: String) {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.min as f64,
|
||||
"type" => "max",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p10 as f64,
|
||||
"type" => "p10",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p20 as f64,
|
||||
"type" => "p20",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p30 as f64,
|
||||
"type" => "p30",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p40 as f64,
|
||||
"type" => "p40",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p50 as f64,
|
||||
"type" => "p50",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p60 as f64,
|
||||
"type" => "p60",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p70 as f64,
|
||||
"type" => "p70",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p80 as f64,
|
||||
"type" => "p80",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p90 as f64,
|
||||
"type" => "p90",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p99 as f64,
|
||||
"type" => "p99",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p999 as f64,
|
||||
"type" => "p99.9",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.max as f64,
|
||||
"type" => "max",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
306
crates/udp/src/workers/statistics/mod.rs
Normal file
306
crates/udp/src/workers/statistics/mod.rs
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
mod collector;
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::{IndexMap, PanicSentinel};
|
||||
use aquatic_udp_protocol::{PeerClient, PeerId};
|
||||
use compact_str::CompactString;
|
||||
use crossbeam_channel::Receiver;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
use time::format_description::well_known::Rfc2822;
|
||||
use time::OffsetDateTime;
|
||||
use tinytemplate::TinyTemplate;
|
||||
|
||||
use collector::{CollectedStatistics, StatisticsCollector};
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
const TEMPLATE_KEY: &str = "statistics";
|
||||
const TEMPLATE_CONTENTS: &str = include_str!("../../../templates/statistics.html");
|
||||
const STYLESHEET_CONTENTS: &str = concat!(
|
||||
"<style>",
|
||||
include_str!("../../../templates/statistics.css"),
|
||||
"</style>"
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct TemplateData {
|
||||
stylesheet: String,
|
||||
ipv4_active: bool,
|
||||
ipv6_active: bool,
|
||||
extended_active: bool,
|
||||
ipv4: CollectedStatistics,
|
||||
ipv6: CollectedStatistics,
|
||||
last_updated: String,
|
||||
peer_update_interval: String,
|
||||
peer_clients: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
pub fn run_statistics_worker(
|
||||
_sentinel: PanicSentinel,
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics_receiver: Receiver<StatisticsMessage>,
|
||||
) {
|
||||
let process_peer_client_data = {
|
||||
let mut collect = config.statistics.write_html_to_file;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
{
|
||||
collect |= config.statistics.run_prometheus_endpoint;
|
||||
}
|
||||
|
||||
collect & config.statistics.peer_clients
|
||||
};
|
||||
|
||||
let opt_tt = if config.statistics.write_html_to_file {
|
||||
let mut tt = TinyTemplate::new();
|
||||
|
||||
if let Err(err) = tt.add_template(TEMPLATE_KEY, TEMPLATE_CONTENTS) {
|
||||
::log::error!("Couldn't parse statistics html template: {:#}", err);
|
||||
|
||||
None
|
||||
} else {
|
||||
Some(tt)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut ipv4_collector = StatisticsCollector::new(
|
||||
shared_state.statistics_ipv4,
|
||||
#[cfg(feature = "prometheus")]
|
||||
"4".into(),
|
||||
);
|
||||
let mut ipv6_collector = StatisticsCollector::new(
|
||||
shared_state.statistics_ipv6,
|
||||
#[cfg(feature = "prometheus")]
|
||||
"6".into(),
|
||||
);
|
||||
|
||||
// Store a count to enable not removing peers from the count completely
|
||||
// just because they were removed from one torrent
|
||||
let mut peers: IndexMap<PeerId, (usize, PeerClient, CompactString)> = IndexMap::default();
|
||||
|
||||
loop {
|
||||
let start_time = Instant::now();
|
||||
|
||||
for message in statistics_receiver.try_iter() {
|
||||
match message {
|
||||
StatisticsMessage::Ipv4PeerHistogram(h) => ipv4_collector.add_histogram(&config, h),
|
||||
StatisticsMessage::Ipv6PeerHistogram(h) => ipv6_collector.add_histogram(&config, h),
|
||||
StatisticsMessage::PeerAdded(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
peers
|
||||
.entry(peer_id)
|
||||
.or_insert_with(|| (0, peer_id.client(), peer_id.first_8_bytes_hex()))
|
||||
.0 += 1;
|
||||
}
|
||||
}
|
||||
StatisticsMessage::PeerRemoved(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
if let Some((count, _, _)) = peers.get_mut(&peer_id) {
|
||||
*count -= 1;
|
||||
|
||||
if *count == 0 {
|
||||
peers.remove(&peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let statistics_ipv4 = ipv4_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
let statistics_ipv6 = ipv6_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
|
||||
let peer_clients = if process_peer_client_data {
|
||||
let mut clients: IndexMap<PeerClient, usize> = IndexMap::default();
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
let mut prefixes: IndexMap<CompactString, usize> = IndexMap::default();
|
||||
|
||||
// Only count peer_ids once, even if they are in multiple torrents
|
||||
for (_, peer_client, prefix) in peers.values() {
|
||||
*clients.entry(peer_client.to_owned()).or_insert(0) += 1;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
*prefixes.entry(prefix.to_owned()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
clients.sort_unstable_by(|_, a, _, b| b.cmp(a));
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
for (prefix, count) in prefixes {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_id_prefixes",
|
||||
count as f64,
|
||||
"prefix_hex" => prefix.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut client_vec = Vec::with_capacity(clients.len());
|
||||
|
||||
for (client, count) in clients {
|
||||
if config.statistics.write_html_to_file {
|
||||
client_vec.push((client.to_string(), count.to_formatted_string(&Locale::en)));
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_clients",
|
||||
count as f64,
|
||||
"client" => client.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
client_vec
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
if config.statistics.print_to_stdout {
|
||||
println!("General:");
|
||||
println!(
|
||||
" access list entries: {}",
|
||||
shared_state.access_list.load().len()
|
||||
);
|
||||
|
||||
if config.network.ipv4_active() {
|
||||
println!("IPv4:");
|
||||
print_to_stdout(&config, &statistics_ipv4);
|
||||
}
|
||||
if config.network.ipv6_active() {
|
||||
println!("IPv6:");
|
||||
print_to_stdout(&config, &statistics_ipv6);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
if let Some(tt) = opt_tt.as_ref() {
|
||||
let template_data = TemplateData {
|
||||
stylesheet: STYLESHEET_CONTENTS.to_string(),
|
||||
ipv4_active: config.network.ipv4_active(),
|
||||
ipv6_active: config.network.ipv6_active(),
|
||||
extended_active: config.statistics.torrent_peer_histograms,
|
||||
ipv4: statistics_ipv4,
|
||||
ipv6: statistics_ipv6,
|
||||
last_updated: OffsetDateTime::now_utc()
|
||||
.format(&Rfc2822)
|
||||
.unwrap_or("(formatting error)".into()),
|
||||
peer_update_interval: format!("{}", config.cleaning.torrent_cleaning_interval),
|
||||
peer_clients,
|
||||
};
|
||||
|
||||
if let Err(err) = save_html_to_file(&config, tt, &template_data) {
|
||||
::log::error!("Couldn't save statistics to file: {:#}", err)
|
||||
}
|
||||
}
|
||||
|
||||
peers.shrink_to_fit();
|
||||
|
||||
if let Some(time_remaining) =
|
||||
Duration::from_secs(config.statistics.interval).checked_sub(start_time.elapsed())
|
||||
{
|
||||
::std::thread::sleep(time_remaining);
|
||||
} else {
|
||||
::log::warn!(
|
||||
"statistics interval not long enough to process all data, output may be misleading"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_to_stdout(config: &Config, statistics: &CollectedStatistics) {
|
||||
println!(
|
||||
" bandwidth: {:>7} Mbit/s in, {:7} Mbit/s out",
|
||||
statistics.rx_mbits, statistics.tx_mbits,
|
||||
);
|
||||
println!(" requests/second: {:>10}", statistics.requests_per_second);
|
||||
println!(" responses/second");
|
||||
println!(
|
||||
" total: {:>10}",
|
||||
statistics.responses_per_second_total
|
||||
);
|
||||
println!(
|
||||
" connect: {:>10}",
|
||||
statistics.responses_per_second_connect
|
||||
);
|
||||
println!(
|
||||
" announce: {:>10}",
|
||||
statistics.responses_per_second_announce
|
||||
);
|
||||
println!(
|
||||
" scrape: {:>10}",
|
||||
statistics.responses_per_second_scrape
|
||||
);
|
||||
println!(
|
||||
" error: {:>10}",
|
||||
statistics.responses_per_second_error
|
||||
);
|
||||
println!(" torrents: {:>10}", statistics.num_torrents);
|
||||
println!(
|
||||
" peers: {:>10} (updated every {}s)",
|
||||
statistics.num_peers, config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
|
||||
if config.statistics.torrent_peer_histograms {
|
||||
println!(
|
||||
" peers per torrent (updated every {}s)",
|
||||
config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
println!(" min {:>10}", statistics.peer_histogram.min);
|
||||
println!(" p10 {:>10}", statistics.peer_histogram.p10);
|
||||
println!(" p20 {:>10}", statistics.peer_histogram.p20);
|
||||
println!(" p30 {:>10}", statistics.peer_histogram.p30);
|
||||
println!(" p40 {:>10}", statistics.peer_histogram.p40);
|
||||
println!(" p50 {:>10}", statistics.peer_histogram.p50);
|
||||
println!(" p60 {:>10}", statistics.peer_histogram.p60);
|
||||
println!(" p70 {:>10}", statistics.peer_histogram.p70);
|
||||
println!(" p80 {:>10}", statistics.peer_histogram.p80);
|
||||
println!(" p90 {:>10}", statistics.peer_histogram.p90);
|
||||
println!(" p95 {:>10}", statistics.peer_histogram.p95);
|
||||
println!(" p99 {:>10}", statistics.peer_histogram.p99);
|
||||
println!(" p99.9 {:>10}", statistics.peer_histogram.p999);
|
||||
println!(" max {:>10}", statistics.peer_histogram.max);
|
||||
}
|
||||
}
|
||||
|
||||
fn save_html_to_file(
|
||||
config: &Config,
|
||||
tt: &TinyTemplate,
|
||||
template_data: &TemplateData,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut file = File::create(&config.statistics.html_file_path).with_context(|| {
|
||||
format!(
|
||||
"File path: {}",
|
||||
&config.statistics.html_file_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
|
||||
write!(file, "{}", tt.render(TEMPLATE_KEY, template_data)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue