aquatic: don't use atomic usizes in torrent state

Doesn't make sense any more, since now a Mutex is used, not
dashmap with RwLock-like functionality
This commit is contained in:
Joakim Frostegård 2020-05-06 00:27:16 +02:00
parent ce1fc8c1b3
commit f9428801bf
4 changed files with 17 additions and 22 deletions

View file

@ -1,7 +1,6 @@
# TODO
## aquatic
* don't use atomic usizes in torrent state, doesn't make sense anymore
* mio: set oneshot for epoll and kqueue? otherwise, stop reregistering?
* Handle Ipv4 and Ipv6 peers. Probably split torrent state. Ipv4 peers
can't make use of Ipv6 ones. Ipv6 ones may or may note be able to make

View file

@ -97,8 +97,8 @@ pub type PeerMap = IndexMap<PeerMapKey, Peer>;
pub struct TorrentData {
pub peers: PeerMap,
pub num_seeders: AtomicUsize,
pub num_leechers: AtomicUsize,
pub num_seeders: usize,
pub num_leechers: usize,
}
@ -106,8 +106,8 @@ impl Default for TorrentData {
fn default() -> Self {
Self {
peers: IndexMap::new(),
num_seeders: AtomicUsize::new(0),
num_leechers: AtomicUsize::new(0),
num_seeders: 0,
num_leechers: 0,
}
}
}

View file

@ -1,5 +1,4 @@
use std::net::SocketAddr;
use std::sync::atomic::Ordering;
use std::time::Duration;
use std::vec::Drain;
@ -233,20 +232,20 @@ pub fn handle_announce_requests(
match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers.fetch_add(1, Ordering::SeqCst);
torrent_data.num_leechers += 1;
},
PeerStatus::Seeding => {
torrent_data.num_seeders.fetch_add(1, Ordering::SeqCst);
torrent_data.num_seeders += 1;
},
PeerStatus::Stopped => {}
};
match opt_removed_peer_status {
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers.fetch_sub(1, Ordering::SeqCst);
torrent_data.num_leechers -= 1;
},
Some(PeerStatus::Seeding) => {
torrent_data.num_seeders.fetch_sub(1, Ordering::SeqCst);
torrent_data.num_seeders -= 1;
},
_ => {}
}
@ -260,8 +259,8 @@ pub fn handle_announce_requests(
let response = Response::Announce(AnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.network.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers.load(Ordering::SeqCst) as i32),
seeders: NumberOfPeers(torrent_data.num_seeders.load(Ordering::SeqCst) as i32),
leechers: NumberOfPeers(torrent_data.num_leechers as i32),
seeders: NumberOfPeers(torrent_data.num_seeders as i32),
peers: response_peers
});
@ -286,8 +285,8 @@ pub fn handle_scrape_requests(
for info_hash in request.info_hashes.iter() {
if let Some(torrent_data) = torrents.get(info_hash){
stats.push(create_torrent_scrape_statistics(
torrent_data.num_seeders.load(Ordering::SeqCst) as i32,
torrent_data.num_leechers.load(Ordering::SeqCst) as i32,
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
));
} else {
stats.push(empty_stats);

View file

@ -20,8 +20,8 @@ pub fn clean_connections_and_torrents(state: &State){
let mut torrents = state.torrents.lock();
torrents.retain(|_, torrent| {
let num_seeders = &torrent.num_seeders;
let num_leechers = &torrent.num_leechers;
let num_seeders = &mut torrent.num_seeders;
let num_leechers = &mut torrent.num_leechers;
torrent.peers.retain(|_, peer| {
let keep = peer.valid_until.0 > now;
@ -29,10 +29,10 @@ pub fn clean_connections_and_torrents(state: &State){
if !keep {
match peer.status {
PeerStatus::Seeding => {
num_seeders.fetch_sub(1, Ordering::SeqCst);
*num_seeders -= 1;
},
PeerStatus::Leeching => {
num_leechers.fetch_sub(1, Ordering::SeqCst);
*num_leechers -= 1;
},
_ => (),
};
@ -94,10 +94,7 @@ pub fn gather_and_print_statistics(
let torrents = &mut state.torrents.lock();
for torrent in torrents.values(){
let num_seeders = torrent.num_seeders.load(Ordering::SeqCst);
let num_leechers = torrent.num_leechers.load(Ordering::SeqCst);
let num_peers = (num_seeders + num_leechers) as u64;
let num_peers = (torrent.num_seeders + torrent.num_leechers) as u64;
if let Err(err) = peers_per_torrent.increment(num_peers){
eprintln!("error incrementing peers_per_torrent histogram: {}", err)