Initial commit: aquatic, udp bittorrent tracker

This commit is contained in:
Joakim Frostegård 2020-04-04 22:08:32 +02:00
commit ace2e1a296
18 changed files with 1386 additions and 0 deletions

23
aquatic/Cargo.toml Normal file
View file

@ -0,0 +1,23 @@
[package]
name = "aquatic"
version = "0.1.0"
authors = ["Joakim Frostegård <joakim.frostegard@gmail.com>"]
edition = "2018"
[[bin]]
name = "aquatic"
path = "src/main.rs"
[dependencies]
bittorrent_udp = { path = "../bittorrent_udp" }
dashmap = "3"
indexmap = "1"
net2 = "0.2"
[dependencies.rand]
version = "0.7"
features = ["small_rng"]
[dependencies.mio]
version = "0.7"
features = ["udp", "os-poll", "os-util"]

134
aquatic/src/handler.rs Normal file
View file

@ -0,0 +1,134 @@
use std::net::SocketAddr;
use std::sync::atomic::Ordering;
use std::time::Instant;
use rand::{self, SeedableRng, rngs::SmallRng, thread_rng};
use rand::seq::IteratorRandom;
use bittorrent_udp::types::*;
use crate::types::*;
pub fn gen_responses(
state: &State,
connect_requests: Vec<(ConnectRequest, SocketAddr)>,
announce_requests: Vec<(AnnounceRequest, SocketAddr)>
)-> Vec<(Response, SocketAddr)> {
let mut responses = Vec::new();
let now = Time(Instant::now());
for (request, src) in connect_requests {
let connection_id = ConnectionId(rand::random());
let key = ConnectionKey {
connection_id,
socket_addr: src,
};
state.connections.insert(key, now);
responses.push((Response::Connect(
ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
}
), src));
}
for (request, src) in announce_requests {
let connection_key = ConnectionKey {
connection_id: request.connection_id,
socket_addr: src,
};
if !state.connections.contains_key(&connection_key){
continue;
}
let mut torrent_data = state.torrents
.entry(request.info_hash)
.or_insert_with(|| TorrentData::default());
let peer_key = PeerMapKey {
ip: src.ip(),
peer_id: request.peer_id,
};
let peer = Peer::from_announce_and_ip(&request, src.ip());
let peer_status = peer.status;
let opt_removed_peer = if peer.status == PeerStatus::Stopped {
torrent_data.peers.remove(&peer_key)
} else {
torrent_data.peers.insert(peer_key, peer)
};
match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers.fetch_add(1, Ordering::SeqCst);
},
PeerStatus::Seeding => {
torrent_data.num_seeders.fetch_add(1, Ordering::SeqCst);
},
PeerStatus::Stopped => {}
};
if let Some(removed_peer) = opt_removed_peer {
match removed_peer.status {
PeerStatus::Leeching => {
torrent_data.num_leechers.fetch_sub(1, Ordering::SeqCst);
},
PeerStatus::Seeding => {
torrent_data.num_seeders.fetch_sub(1, Ordering::SeqCst);
},
PeerStatus::Stopped => {}
}
}
let response_peers = extract_response_peers(&torrent_data.peers, 100); // FIXME num peers
let response = Response::Announce(AnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(
600 // config.announce_interval as i32
),
leechers: NumberOfPeers(torrent_data.num_leechers.load(Ordering::SeqCst) as i32),
seeders: NumberOfPeers(torrent_data.num_seeders.load(Ordering::SeqCst) as i32),
peers: response_peers
});
responses.push((response, src));
}
responses
}
/// Extract response peers
///
/// Do a random selection of peers if there are more than
/// `number_of_peers_to_take`. I tried out just selecting a random range,
/// but this might cause issues with the announcing peer getting back too
/// homogenous peers (based on when they were inserted into the map.)
///
/// Don't care if we send back announcing peer.
pub fn extract_response_peers(
peer_map: &PeerMap,
number_of_peers_to_take: usize,
) -> Vec<ResponsePeer> {
let peer_map_len = peer_map.len();
if peer_map_len <= number_of_peers_to_take {
peer_map.values()
.map(Peer::to_response_peer)
.collect()
} else {
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
peer_map.values()
.map(Peer::to_response_peer)
.choose_multiple(&mut rng, number_of_peers_to_take)
}
}

25
aquatic/src/main.rs Normal file
View file

@ -0,0 +1,25 @@
use std::time::Duration;
mod handler;
mod network;
mod types;
use types::State;
fn main(){
let addr = ([127, 0, 0, 1], 3000).into();
let socket = network::create_socket(addr, 4096 * 8);
let state = State::new();
for i in 1..4 {
let socket = socket.try_clone().unwrap();
let state = state.clone();
::std::thread::spawn(move || {
network::run_event_loop(state, socket, i, 4096, Duration::from_millis(1000));
});
}
network::run_event_loop(state, socket, 0, 4096, Duration::from_millis(1000));
}

150
aquatic/src/network.rs Normal file
View file

@ -0,0 +1,150 @@
use std::net::SocketAddr;
use std::time::Duration;
use std::io::ErrorKind;
use mio::{Events, Poll, Interest, Token};
use mio::net::UdpSocket;
use net2::{UdpSocketExt, UdpBuilder};
use net2::unix::UnixUdpBuilderExt;
use bittorrent_udp::types::IpVersion;
use bittorrent_udp::converters::{response_to_bytes, request_from_bytes};
use crate::types::*;
use crate::handler::*;
pub fn create_socket(
addr: SocketAddr,
recv_buffer_size: usize,
) -> ::std::net::UdpSocket {
let mut builder = &{
if addr.is_ipv4(){
UdpBuilder::new_v4().expect("socket: build")
} else {
UdpBuilder::new_v6().expect("socket: build")
}
};
builder = builder.reuse_port(true)
.expect("socket: set reuse port");
let socket = builder.bind(&addr)
.expect(&format!("socket: bind to {}", addr));
socket.set_nonblocking(true)
.expect("socket: set nonblocking");
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size){
eprintln!(
"socket: failed setting recv buffer to {}: {:?}",
recv_buffer_size,
err
);
}
socket
}
pub fn run_event_loop(
state: State,
socket: ::std::net::UdpSocket,
token_num: usize,
event_capacity: usize,
poll_timeout: Duration,
){
let mut buffer = [0u8; 4096];
let mut socket = UdpSocket::from_std(socket);
let mut poll = Poll::new().expect("create poll");
let interests = Interest::READABLE | Interest::WRITABLE;
poll.registry()
.register(&mut socket, Token(token_num), interests)
.unwrap();
let mut events = Events::with_capacity(event_capacity);
loop {
poll.poll(&mut events, Some(poll_timeout))
.expect("failed polling");
for event in events.iter(){
let token = event.token();
if token.0 == token_num {
if event.is_readable(){
let mut connect_requests: Vec<(ConnectRequest, SocketAddr)> = Vec::with_capacity(event_capacity);
let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::with_capacity(event_capacity);
loop {
match socket.recv_from(&mut buffer) {
Ok((amt, src)) => {
let request = request_from_bytes(
&buffer[..amt],
255u8
);
match request {
Request::Connect(r) => {
connect_requests.push((r, src));
},
Request::Announce(r) => {
announce_requests.push((r, src));
},
_ => {
// FIXME
}
}
},
Err(err) => {
match err.kind() {
ErrorKind::WouldBlock => {
break;
},
err => {
eprintln!("recv_from error: {:?}", err);
break;
}
}
}
}
}
let responses = gen_responses(
&state,
connect_requests,
announce_requests
);
for (response, src) in responses {
let bytes = response_to_bytes(&response, IpVersion::IPv4);
match socket.send_to(&bytes[..], src){
Ok(_bytes_sent) => {
},
Err(err) => {
match err.kind(){
ErrorKind::WouldBlock => {
break;
},
err => {
eprintln!("send_to error: {:?}", err);
break;
}
}
}
}
}
poll.registry().reregister(&mut socket, token, interests).unwrap();
}
}
}
}
}

129
aquatic/src/types.rs Normal file
View file

@ -0,0 +1,129 @@
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::net::{SocketAddr, IpAddr};
use std::time::Instant;
use dashmap::DashMap;
use indexmap::IndexMap;
pub use bittorrent_udp::types::*;
#[derive(Debug, Clone, Copy)]
pub struct Time(pub Instant);
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ConnectionKey {
pub connection_id: ConnectionId,
pub socket_addr: SocketAddr
}
pub type ConnectionMap = DashMap<ConnectionKey, Time>;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum PeerStatus {
Seeding,
Leeching,
Stopped
}
impl PeerStatus {
/// Determine peer status from announce event and number of bytes left.
///
/// Likely, the last branch will be taken most of the time.
pub fn from_event_and_bytes_left(
event: AnnounceEvent,
bytes_left: NumberOfBytes
) -> Self {
if event == AnnounceEvent::Stopped {
Self::Stopped
} else if bytes_left.0 == 0 {
Self::Seeding
} else {
Self::Leeching
}
}
}
#[derive(Clone, Debug)]
pub struct Peer {
pub id: PeerId,
pub connection_id: ConnectionId,
pub ip_address: IpAddr,
pub port: Port,
pub status: PeerStatus,
pub last_announce: Time
}
impl Peer {
pub fn to_response_peer(&self) -> ResponsePeer {
ResponsePeer {
ip_address: self.ip_address,
port: self.port
}
}
pub fn from_announce_and_ip(
announce_request: &AnnounceRequest,
ip_address: IpAddr
) -> Self {
Self {
id: announce_request.peer_id,
connection_id: announce_request.connection_id,
ip_address,
port: announce_request.port,
status: PeerStatus::from_event_and_bytes_left(
announce_request.event,
announce_request.bytes_left
),
last_announce: Time(Instant::now())
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct PeerMapKey {
pub ip: IpAddr,
pub peer_id: PeerId
}
pub type PeerMap = IndexMap<PeerMapKey, Peer>;
pub struct TorrentData {
pub peers: PeerMap,
pub num_seeders: AtomicUsize,
pub num_leechers: AtomicUsize,
}
impl Default for TorrentData {
fn default() -> Self {
Self {
peers: IndexMap::new(),
num_seeders: AtomicUsize::new(0),
num_leechers: AtomicUsize::new(0),
}
}
}
pub type TorrentMap = DashMap<InfoHash, TorrentData>;
#[derive(Clone)]
pub struct State {
pub connections: Arc<ConnectionMap>,
pub torrents: Arc<TorrentMap>,
}
impl State {
pub fn new() -> Self {
Self {
connections: Arc::new(DashMap::new()),
torrents: Arc::new(DashMap::new()),
}
}
}