ws: use idiomatic folder structure

This commit is contained in:
Joakim Frostegård 2021-11-27 18:19:08 +01:00
parent aa3253fcd6
commit d20e57d861
17 changed files with 0 additions and 2 deletions

View file

@ -0,0 +1,10 @@
use std::sync::Arc;
use aquatic_common::access_list::AccessListArcSwap;
pub type TlsConfig = futures_rustls::rustls::ServerConfig;
#[derive(Default, Clone)]
pub struct State {
pub access_list: Arc<AccessListArcSwap>,
}

View file

@ -0,0 +1,114 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::time::Duration;
use futures_lite::StreamExt;
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
use glommio::enclose;
use glommio::prelude::*;
use glommio::timer::TimerActionRepeat;
use rand::{rngs::SmallRng, SeedableRng};
use aquatic_ws_protocol::*;
use crate::common::handlers::*;
use crate::common::*;
use crate::config::Config;
use super::common::State;
pub async fn run_request_worker(
config: Config,
state: State,
in_message_mesh_builder: MeshBuilder<(ConnectionMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(ConnectionMeta, OutMessage), Partial>,
) {
let (_, mut in_message_receivers) = in_message_mesh_builder.join(Role::Consumer).await.unwrap();
let (out_message_senders, _) = out_message_mesh_builder.join(Role::Producer).await.unwrap();
let out_message_senders = Rc::new(out_message_senders);
let torrents = Rc::new(RefCell::new(TorrentMaps::default()));
let access_list = state.access_list;
// Periodically clean torrents
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
enclose!((config, torrents, access_list) move || async move {
torrents.borrow_mut().clean(&config, &access_list);
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
})()
}));
let mut handles = Vec::new();
for (_, receiver) in in_message_receivers.streams() {
let handle = spawn_local(handle_request_stream(
config.clone(),
torrents.clone(),
out_message_senders.clone(),
receiver,
))
.detach();
handles.push(handle);
}
for handle in handles {
handle.await;
}
}
async fn handle_request_stream<S>(
config: Config,
torrents: Rc<RefCell<TorrentMaps>>,
out_message_senders: Rc<Senders<(ConnectionMeta, OutMessage)>>,
mut stream: S,
) where
S: futures_lite::Stream<Item = (ConnectionMeta, InMessage)> + ::std::marker::Unpin,
{
let mut rng = SmallRng::from_entropy();
let max_peer_age = config.cleaning.max_peer_age;
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_peer_age)));
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
enclose!((peer_valid_until) move || async move {
*peer_valid_until.borrow_mut() = ValidUntil::new(max_peer_age);
Some(Duration::from_secs(1))
})()
}));
let mut out_messages = Vec::new();
while let Some((meta, in_message)) = stream.next().await {
match in_message {
InMessage::AnnounceRequest(request) => handle_announce_request(
&config,
&mut rng,
&mut torrents.borrow_mut(),
&mut out_messages,
peer_valid_until.borrow().to_owned(),
meta,
request,
),
InMessage::ScrapeRequest(request) => handle_scrape_request(
&config,
&mut torrents.borrow_mut(),
&mut out_messages,
meta,
request,
),
};
for (meta, out_message) in out_messages.drain(..) {
out_message_senders
.send_to(meta.out_message_consumer_id.0, (meta, out_message))
.await
.expect("failed sending out_message to socket worker");
}
yield_if_needed().await;
}
}

View file

@ -0,0 +1,140 @@
pub mod common;
pub mod handlers;
pub mod network;
use std::{
fs::File,
io::BufReader,
sync::{atomic::AtomicUsize, Arc},
};
use crate::config::Config;
#[cfg(feature = "cpu-pinning")]
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
use aquatic_common::privileges::drop_privileges_after_socket_binding;
use self::common::*;
use glommio::{channels::channel_mesh::MeshBuilder, prelude::*};
const SHARED_CHANNEL_SIZE: usize = 1024;
pub fn run_inner(config: Config, state: State) -> anyhow::Result<()> {
let num_peers = config.socket_workers + config.request_workers;
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE);
let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE);
let num_bound_sockets = Arc::new(AtomicUsize::new(0));
let tls_config = Arc::new(create_tls_config(&config).unwrap());
let mut executors = Vec::new();
for i in 0..(config.socket_workers) {
let config = config.clone();
let state = state.clone();
let tls_config = tls_config.clone();
let request_mesh_builder = request_mesh_builder.clone();
let response_mesh_builder = response_mesh_builder.clone();
let num_bound_sockets = num_bound_sockets.clone();
let builder = LocalExecutorBuilder::default().name("socket");
let executor = builder.spawn(move || async move {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::SocketWorker(i),
);
network::run_socket_worker(
config,
state,
tls_config,
request_mesh_builder,
response_mesh_builder,
num_bound_sockets,
)
.await
});
executors.push(executor);
}
for i in 0..(config.request_workers) {
let config = config.clone();
let state = state.clone();
let request_mesh_builder = request_mesh_builder.clone();
let response_mesh_builder = response_mesh_builder.clone();
let builder = LocalExecutorBuilder::default().name("request");
let executor = builder.spawn(move || async move {
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::RequestWorker(i),
);
handlers::run_request_worker(config, state, request_mesh_builder, response_mesh_builder)
.await
});
executors.push(executor);
}
drop_privileges_after_socket_binding(
&config.privileges,
num_bound_sockets,
config.socket_workers,
)
.unwrap();
#[cfg(feature = "cpu-pinning")]
pin_current_if_configured_to(
&config.cpu_pinning,
config.socket_workers,
WorkerIndex::Other,
);
for executor in executors {
executor
.expect("failed to spawn local executor")
.join()
.unwrap();
}
Ok(())
}
fn create_tls_config(config: &Config) -> anyhow::Result<TlsConfig> {
let certs = {
let f = File::open(&config.network.tls_certificate_path)?;
let mut f = BufReader::new(f);
rustls_pemfile::certs(&mut f)?
.into_iter()
.map(|bytes| futures_rustls::rustls::Certificate(bytes))
.collect()
};
let private_key = {
let f = File::open(&config.network.tls_private_key_path)?;
let mut f = BufReader::new(f);
rustls_pemfile::pkcs8_private_keys(&mut f)?
.first()
.map(|bytes| futures_rustls::rustls::PrivateKey(bytes.clone()))
.ok_or(anyhow::anyhow!("No private keys in file"))?
};
let tls_config = futures_rustls::rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, private_key)?;
Ok(tls_config)
}

View file

@ -0,0 +1,443 @@
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::rc::Rc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
use aquatic_common::convert_ipv4_mapped_ipv6;
use aquatic_ws_protocol::*;
use async_tungstenite::WebSocketStream;
use futures::stream::{SplitSink, SplitStream};
use futures_lite::future::race;
use futures_lite::StreamExt;
use futures_rustls::server::TlsStream;
use futures_rustls::TlsAcceptor;
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
use glommio::channels::local_channel::{new_unbounded, LocalReceiver, LocalSender};
use glommio::channels::shared_channel::ConnectedReceiver;
use glommio::net::{TcpListener, TcpStream};
use glommio::timer::TimerActionRepeat;
use glommio::{enclose, prelude::*};
use hashbrown::HashMap;
use slab::Slab;
use crate::config::Config;
use crate::common::*;
use super::common::*;
struct PendingScrapeResponse {
pending_worker_out_messages: usize,
stats: HashMap<InfoHash, ScrapeStatistics>,
}
struct ConnectionReference {
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
}
pub async fn run_socket_worker(
config: Config,
state: State,
tls_config: Arc<TlsConfig>,
in_message_mesh_builder: MeshBuilder<(ConnectionMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(ConnectionMeta, OutMessage), Partial>,
num_bound_sockets: Arc<AtomicUsize>,
) {
let config = Rc::new(config);
let access_list = state.access_list;
let listener = TcpListener::bind(config.network.address).expect("bind socket");
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let (in_message_senders, _) = in_message_mesh_builder.join(Role::Producer).await.unwrap();
let in_message_senders = Rc::new(in_message_senders);
let (_, mut out_message_receivers) =
out_message_mesh_builder.join(Role::Consumer).await.unwrap();
let out_message_consumer_id = ConsumerId(out_message_receivers.consumer_id().unwrap());
let connection_slab = Rc::new(RefCell::new(Slab::new()));
let connections_to_remove = Rc::new(RefCell::new(Vec::new()));
// Periodically remove closed connections
TimerActionRepeat::repeat(
enclose!((config, connection_slab, connections_to_remove) move || {
remove_closed_connections(
config.clone(),
connection_slab.clone(),
connections_to_remove.clone(),
)
}),
);
for (_, out_message_receiver) in out_message_receivers.streams() {
spawn_local(receive_out_messages(
out_message_receiver,
connection_slab.clone(),
))
.detach();
}
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
match stream {
Ok(stream) => {
let (out_message_sender, out_message_receiver) = new_unbounded();
let out_message_sender = Rc::new(out_message_sender);
let key = RefCell::borrow_mut(&connection_slab).insert(ConnectionReference {
out_message_sender: out_message_sender.clone(),
});
spawn_local(enclose!((config, access_list, in_message_senders, tls_config, connections_to_remove) async move {
if let Err(err) = Connection::run(
config,
access_list,
in_message_senders,
out_message_sender,
out_message_receiver,
out_message_consumer_id,
ConnectionId(key),
tls_config,
stream
).await {
::log::debug!("Connection::run() error: {:?}", err);
}
RefCell::borrow_mut(&connections_to_remove).push(key);
}))
.detach();
}
Err(err) => {
::log::error!("accept connection: {:?}", err);
}
}
}
}
async fn remove_closed_connections(
config: Rc<Config>,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
connections_to_remove: Rc<RefCell<Vec<usize>>>,
) -> Option<Duration> {
let connections_to_remove = connections_to_remove.replace(Vec::new());
for connection_id in connections_to_remove {
if let Some(_) = RefCell::borrow_mut(&connection_slab).try_remove(connection_id) {
::log::debug!("removed connection with id {}", connection_id);
} else {
::log::error!(
"couldn't remove connection with id {}, it is not in connection slab",
connection_id
);
}
}
Some(Duration::from_secs(
config.cleaning.torrent_cleaning_interval,
))
}
async fn receive_out_messages(
mut out_message_receiver: ConnectedReceiver<(ConnectionMeta, OutMessage)>,
connection_references: Rc<RefCell<Slab<ConnectionReference>>>,
) {
while let Some(channel_out_message) = out_message_receiver.next().await {
if let Some(reference) = connection_references
.borrow()
.get(channel_out_message.0.connection_id.0)
{
match reference.out_message_sender.try_send(channel_out_message) {
Ok(()) | Err(GlommioError::Closed(_)) => {}
Err(err) => {
::log::error!(
"Couldn't send out_message from shared channel to local receiver: {:?}",
err
);
}
}
}
}
}
struct Connection;
impl Connection {
async fn run(
config: Rc<Config>,
access_list: Arc<AccessListArcSwap>,
in_message_senders: Rc<Senders<(ConnectionMeta, InMessage)>>,
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
out_message_receiver: LocalReceiver<(ConnectionMeta, OutMessage)>,
out_message_consumer_id: ConsumerId,
connection_id: ConnectionId,
tls_config: Arc<TlsConfig>,
stream: TcpStream,
) -> anyhow::Result<()> {
let peer_addr = stream
.peer_addr()
.map_err(|err| anyhow::anyhow!("Couldn't get peer addr: {:?}", err))?;
let tls_acceptor: TlsAcceptor = tls_config.into();
let stream = tls_acceptor.accept(stream).await?;
let ws_config = tungstenite::protocol::WebSocketConfig {
max_frame_size: Some(config.network.websocket_max_frame_size),
max_message_size: Some(config.network.websocket_max_message_size),
..Default::default()
};
let stream = async_tungstenite::accept_async_with_config(stream, Some(ws_config)).await?;
let (ws_out, ws_in) = futures::StreamExt::split(stream);
let pending_scrape_slab = Rc::new(RefCell::new(Slab::new()));
let access_list_cache = create_access_list_cache(&access_list);
let reader_handle = spawn_local(enclose!((pending_scrape_slab) async move {
let mut reader = ConnectionReader {
config,
access_list_cache,
in_message_senders,
out_message_sender,
pending_scrape_slab,
out_message_consumer_id,
ws_in,
peer_addr,
connection_id,
};
reader.run_in_message_loop().await
}))
.detach();
let writer_handle = spawn_local(async move {
let mut writer = ConnectionWriter {
out_message_receiver,
ws_out,
pending_scrape_slab,
peer_addr,
};
writer.run_out_message_loop().await
})
.detach();
race(reader_handle, writer_handle).await.unwrap()
}
}
struct ConnectionReader {
config: Rc<Config>,
access_list_cache: AccessListCache,
in_message_senders: Rc<Senders<(ConnectionMeta, InMessage)>>,
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
out_message_consumer_id: ConsumerId,
ws_in: SplitStream<WebSocketStream<TlsStream<TcpStream>>>,
peer_addr: SocketAddr,
connection_id: ConnectionId,
}
impl ConnectionReader {
async fn run_in_message_loop(&mut self) -> anyhow::Result<()> {
loop {
::log::debug!("read_in_message");
let message = self.ws_in.next().await.unwrap()?;
match InMessage::from_ws_message(message) {
Ok(in_message) => {
::log::debug!("received in_message: {:?}", in_message);
self.handle_in_message(in_message).await?;
}
Err(err) => {
::log::debug!("Couldn't parse in_message: {:?}", err);
self.send_error_response("Invalid request".into(), None);
}
}
}
}
async fn handle_in_message(&mut self, in_message: InMessage) -> anyhow::Result<()> {
match in_message {
InMessage::AnnounceRequest(announce_request) => {
let info_hash = announce_request.info_hash;
if self
.access_list_cache
.load()
.allows(self.config.access_list.mode, &info_hash.0)
{
let in_message = InMessage::AnnounceRequest(announce_request);
let consumer_index =
calculate_in_message_consumer_index(&self.config, info_hash);
// Only fails when receiver is closed
self.in_message_senders
.send_to(
consumer_index,
(self.make_connection_meta(None), in_message),
)
.await
.unwrap();
} else {
self.send_error_response("Info hash not allowed".into(), Some(info_hash));
}
}
InMessage::ScrapeRequest(ScrapeRequest { info_hashes, .. }) => {
let info_hashes = if let Some(info_hashes) = info_hashes {
info_hashes
} else {
// If request.info_hashes is empty, don't return scrape for all
// torrents, even though reference server does it. It is too expensive.
self.send_error_response("Full scrapes are not allowed".into(), None);
return Ok(());
};
let mut info_hashes_by_worker: BTreeMap<usize, Vec<InfoHash>> = BTreeMap::new();
for info_hash in info_hashes.as_vec() {
let info_hashes = info_hashes_by_worker
.entry(calculate_in_message_consumer_index(&self.config, info_hash))
.or_default();
info_hashes.push(info_hash);
}
let pending_worker_out_messages = info_hashes_by_worker.len();
let pending_scrape_response = PendingScrapeResponse {
pending_worker_out_messages,
stats: Default::default(),
};
let pending_scrape_id = PendingScrapeId(
RefCell::borrow_mut(&mut self.pending_scrape_slab)
.insert(pending_scrape_response),
);
let meta = self.make_connection_meta(Some(pending_scrape_id));
for (consumer_index, info_hashes) in info_hashes_by_worker {
let in_message = InMessage::ScrapeRequest(ScrapeRequest {
action: ScrapeAction,
info_hashes: Some(ScrapeRequestInfoHashes::Multiple(info_hashes)),
});
// Only fails when receiver is closed
self.in_message_senders
.send_to(consumer_index, (meta, in_message))
.await
.unwrap();
}
}
}
Ok(())
}
fn send_error_response(&self, failure_reason: Cow<'static, str>, info_hash: Option<InfoHash>) {
let out_message = OutMessage::ErrorResponse(ErrorResponse {
action: Some(ErrorResponseAction::Scrape),
failure_reason,
info_hash,
});
if let Err(err) = self
.out_message_sender
.try_send((self.make_connection_meta(None), out_message))
{
::log::error!("ConnectionWriter::send_error_response failed: {:?}", err)
}
}
fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> ConnectionMeta {
ConnectionMeta {
connection_id: self.connection_id,
out_message_consumer_id: self.out_message_consumer_id,
naive_peer_addr: self.peer_addr,
converted_peer_ip: convert_ipv4_mapped_ipv6(self.peer_addr.ip()),
pending_scrape_id,
}
}
}
struct ConnectionWriter {
out_message_receiver: LocalReceiver<(ConnectionMeta, OutMessage)>,
ws_out: SplitSink<WebSocketStream<TlsStream<TcpStream>>, tungstenite::Message>,
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
peer_addr: SocketAddr,
}
impl ConnectionWriter {
async fn run_out_message_loop(&mut self) -> anyhow::Result<()> {
loop {
let (meta, out_message) = self.out_message_receiver.recv().await.ok_or_else(|| {
anyhow::anyhow!("ConnectionWriter couldn't receive message, sender is closed")
})?;
if meta.naive_peer_addr != self.peer_addr {
return Err(anyhow::anyhow!("peer addresses didn't match"));
}
match out_message {
OutMessage::ScrapeResponse(out_message) => {
let pending_scrape_id = meta
.pending_scrape_id
.expect("meta.pending_scrape_id not set");
let finished = if let Some(pending) = Slab::get_mut(
&mut RefCell::borrow_mut(&self.pending_scrape_slab),
pending_scrape_id.0,
) {
pending.stats.extend(out_message.files);
pending.pending_worker_out_messages -= 1;
pending.pending_worker_out_messages == 0
} else {
return Err(anyhow::anyhow!("pending scrape not found in slab"));
};
if finished {
let out_message = {
let mut slab = RefCell::borrow_mut(&self.pending_scrape_slab);
let pending = slab.remove(pending_scrape_id.0);
slab.shrink_to_fit();
OutMessage::ScrapeResponse(ScrapeResponse {
action: ScrapeAction,
files: pending.stats,
})
};
self.send_out_message(&out_message).await?;
}
}
out_message => {
self.send_out_message(&out_message).await?;
}
};
}
}
async fn send_out_message(&mut self, out_message: &OutMessage) -> anyhow::Result<()> {
futures::SinkExt::send(&mut self.ws_out, out_message.to_ws_message()).await?;
futures::SinkExt::flush(&mut self.ws_out).await?;
Ok(())
}
}
fn calculate_in_message_consumer_index(config: &Config, info_hash: InfoHash) -> usize {
(info_hash.0[0] as usize) % config.request_workers
}