formatter + implement status history

This commit is contained in:
csehviktor
2025-07-15 01:08:51 +02:00
parent 61fb1b1583
commit 6a311246f6
16 changed files with 245 additions and 110 deletions

View File

@@ -1,5 +1,5 @@
use common::StatusMessage;
use tokio::sync::{mpsc::Sender, Mutex};
use tokio::sync::{Mutex, mpsc::Sender};
pub struct ClientManager {
clients: Mutex<Vec<Sender<StatusMessage>>>,
@@ -20,8 +20,6 @@ impl ClientManager {
pub async fn broadcast(&self, message: &StatusMessage) {
let mut clients = self.clients.lock().await;
clients.retain(|client| {
client.try_send(message.clone()).is_ok()
});
clients.retain(|client| client.try_send(message.clone()).is_ok());
}
}

View File

@@ -1,8 +1,8 @@
use rumqttd::{Broker, Config};
use std::sync::Arc;
use crate::{bridge::ClientManager, storage::StorageRepositoryImpl};
use super::subscriber::MqttSubscriber;
use crate::{bridge::ClientManager, storage::StorageRepositoryImpl};
pub struct MqttBroker {
broker: &'static mut Broker,

View File

@@ -1,5 +1,5 @@
use rumqttd::{local::LinkRx, Broker, Notification};
use common::{StatusMessage, MQTT_TOPIC};
use common::{MQTT_TOPIC, StatusMessage};
use rumqttd::{Broker, Notification, local::LinkRx};
use std::sync::Arc;
use crate::{bridge::ClientManager, storage::StorageRepository};
@@ -11,7 +11,11 @@ pub struct MqttSubscriber {
}
impl MqttSubscriber {
pub fn new(broker: &Broker, clients: Arc<ClientManager>, storage: Arc<dyn StorageRepository>) -> Self {
pub fn new(
broker: &Broker,
clients: Arc<ClientManager>,
storage: Arc<dyn StorageRepository>,
) -> Self {
let (mut link_tx, link_rx) = broker.link("internal-subscriber").unwrap();
link_tx.subscribe(MQTT_TOPIC).unwrap();
@@ -32,6 +36,10 @@ impl MqttSubscriber {
if let Err(e) = self.storage.record_uptime(&payload.agent).await {
eprintln!("failed to record uptime for {}: {}", &payload.agent, e);
}
if let Err(e) = self.storage.record_message(&payload).await {
eprintln!("failed to record message for {}: {}", &payload.agent, e);
}
}
}

View File

@@ -1,5 +1,5 @@
use serde::Deserialize;
use rumqttd::Config;
use serde::Deserialize;
const CONFIG_PATH: &str = if cfg!(debug_assertions) {
"server/config.toml"

View File

@@ -1,21 +1,24 @@
use storage::{StorageRepositoryImpl, StorageStrategy};
use broker::manager::MqttBroker;
use config::load_config;
use server::Server;
use std::sync::Arc;
use storage::{StorageRepositoryImpl, StorageStrategy};
pub mod broker;
pub mod bridge;
pub mod broker;
pub mod config;
pub mod storage;
pub mod server;
pub mod storage;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let cfg = load_config()?;
let storage = Arc::new(if cfg.storage.sqlite {
StorageRepositoryImpl::new(StorageStrategy::SQLite(format!("{}agents.db", cfg.storage.db_path)))
StorageRepositoryImpl::new(StorageStrategy::SQLite(format!(
"{}agents.db",
cfg.storage.db_path
)))
} else {
StorageRepositoryImpl::new(StorageStrategy::InMemory)
});

View File

@@ -1,5 +1,5 @@
use warp::{reply::json, Filter, Reply, Rejection};
use std::sync::Arc;
use warp::{Filter, Rejection, Reply, reply::json};
use crate::storage::StorageRepository;
@@ -13,10 +13,20 @@ impl HttpRoutes {
}
pub fn routes(self: Arc<Self>) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
warp::path!("agents")
let agents_storage = self.storage.clone();
let history_storage = self.storage.clone();
let agents_route = warp::path!("agents")
.and(warp::get())
.and(warp::any().map(move || self.storage.clone()))
.and_then(Self::get_agents)
.and(warp::any().map(move || agents_storage.clone()))
.and_then(Self::get_agents);
let history_route = warp::path!("history" / String)
.and(warp::get())
.and(warp::any().map(move || history_storage.clone()))
.and_then(Self::get_history);
agents_route.or(history_route)
}
async fn get_agents(storage: Arc<dyn StorageRepository>) -> Result<impl Reply, Rejection> {
@@ -24,4 +34,13 @@ impl HttpRoutes {
Ok(json(&agents))
}
async fn get_history(
agent: String,
storage: Arc<dyn StorageRepository>,
) -> Result<impl Reply, Rejection> {
let history = storage.get_history(&agent).await.unwrap();
Ok(json(&history))
}
}

View File

@@ -1,7 +1,7 @@
use http::HttpRoutes;
use std::sync::Arc;
use warp::Filter;
use websocket::WebsocketRoutes;
use std::sync::Arc;
use crate::{bridge::ClientManager, storage::StorageRepositoryImpl};
@@ -15,19 +15,14 @@ pub struct Server {
impl Server {
pub fn new(clients: Arc<ClientManager>, storage: Arc<StorageRepositoryImpl>) -> Self {
Self {
clients,
storage,
}
Self { clients, storage }
}
pub async fn serve(&self) {
let http_routes = Arc::new(HttpRoutes::new(self.storage.inner()));
let ws_routes = Arc::new(WebsocketRoutes::new(self.clients.clone()));
let cors = warp::cors()
.allow_any_origin()
.allow_methods(vec!["GET"]);
let cors = warp::cors().allow_any_origin().allow_methods(vec!["GET"]);
let routes = http_routes.routes().with(cors).or(ws_routes.routes());

View File

@@ -1,8 +1,11 @@
use common::StatusMessage;
use warp::{filters::ws::{Message, WebSocket}, Filter, Reply, Rejection};
use futures_util::{StreamExt, SinkExt};
use tokio::sync::mpsc::channel;
use futures_util::{SinkExt, StreamExt};
use std::sync::Arc;
use tokio::sync::mpsc::channel;
use warp::{
Filter, Rejection, Reply,
filters::ws::{Message, WebSocket},
};
use crate::bridge::ClientManager;
@@ -16,17 +19,24 @@ impl WebsocketRoutes {
}
pub fn routes(self: Arc<Self>) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone {
warp::path("ws")
.and(warp::path::param::<String>())
warp::path!("ws" / String)
.and(warp::get())
.and(warp::ws())
.and(warp::any().map(move || self.clients.clone()))
.map(|agent: String, websocket: warp::ws::Ws, clients: Arc<ClientManager>| {
websocket.on_upgrade(move |websocket| Self::handle_ws_connection(agent, websocket, clients))
})
.map(
|agent: String, websocket: warp::ws::Ws, clients: Arc<ClientManager>| {
websocket.on_upgrade(move |websocket| {
Self::handle_ws_connection(agent, websocket, clients)
})
},
)
}
async fn handle_ws_connection(agent: String, websocket: WebSocket, clients: Arc<ClientManager>) {
async fn handle_ws_connection(
agent: String,
websocket: WebSocket,
clients: Arc<ClientManager>,
) {
let (mut ws_tx, _) = websocket.split();
let (tx, mut rx) = channel::<StatusMessage>(100);
@@ -44,5 +54,4 @@ impl WebsocketRoutes {
}
});
}
}

View File

@@ -1,29 +1,42 @@
use std::collections::HashMap;
use chrono::Utc;
use tokio::sync::Mutex;
use async_trait::async_trait;
use chrono::Utc;
use common::StatusMessage;
use std::collections::HashMap;
use tokio::sync::Mutex;
use super::{StorageRepository, UptimeMessage, UptimeStorageModel};
pub struct InMemoryRepository {
agents: Mutex<HashMap<String, UptimeStorageModel>>
agents: Mutex<HashMap<String, UptimeStorageModel>>,
messages: Mutex<HashMap<String, Vec<StatusMessage>>>,
}
impl InMemoryRepository {
pub fn new() -> Self {
Self {
agents: Default::default(),
messages: Default::default(),
}
}
}
#[async_trait]
impl StorageRepository for InMemoryRepository {
async fn record_message(&self, message: &StatusMessage) -> anyhow::Result<()> {
let mut messages = self.messages.lock().await;
let agent_messages = messages.entry(message.agent.clone()).or_default();
agent_messages.push(message.clone());
Ok(())
}
async fn record_uptime(&self, agent: &str) -> anyhow::Result<()> {
let mut agents = self.agents.lock().await;
let now = Utc::now();
agents.entry(agent.to_string())
agents
.entry(agent.to_string())
.and_modify(|a| {
a.last_seen = now;
a.message_count += 1;
@@ -38,18 +51,11 @@ impl StorageRepository for InMemoryRepository {
Ok(())
}
/*
async fn get_uptime(&self, agent: &str) -> anyhow::Result<Option<UptimeMessage>> {
let agents = self.agents.lock().await;
async fn get_history(&self, agent: &str) -> anyhow::Result<Vec<StatusMessage>> {
let messages = self.messages.lock().await;
match agents.get(agent) {
Some(data) => {
Ok(Some(data.clone().into()))
}
None => Ok(None),
}
Ok(messages.get(agent).cloned().unwrap_or_default())
}
*/
async fn get_agents(&self) -> anyhow::Result<Vec<UptimeMessage>> {
let agents = self.agents.lock().await;

View File

@@ -1,10 +1,11 @@
use serde::{Deserialize, Serialize};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use common::{MQTT_SEND_INTERVAL, StatusMessage};
use memory::InMemoryRepository;
use rusqlite::Row;
use serde::{Deserialize, Serialize};
use sqlite::SQLiteRepository;
use std::sync::Arc;
use memory::InMemoryRepository;
use async_trait::async_trait;
use common::MQTT_SEND_INTERVAL;
pub mod memory;
pub mod sqlite;
@@ -39,9 +40,32 @@ impl Into<UptimeMessage> for UptimeStorageModel {
}
}
impl TryFrom<&Row<'_>> for UptimeStorageModel {
type Error = rusqlite::Error;
fn try_from(row: &Row) -> Result<Self, Self::Error> {
let first_seen: DateTime<Utc> = row.get::<_, String>(1)?.parse().map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(1, rusqlite::types::Type::Text, Box::new(e))
})?;
let last_seen: DateTime<Utc> = row.get::<_, String>(2)?.parse().map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(2, rusqlite::types::Type::Text, Box::new(e))
})?;
Ok(UptimeStorageModel {
id: row.get(0)?,
first_seen,
last_seen,
message_count: row.get(1)?,
})
}
}
#[async_trait]
pub trait StorageRepository: Send + Sync {
async fn record_message(&self, message: &StatusMessage) -> anyhow::Result<()>;
async fn record_uptime(&self, agent: &str) -> anyhow::Result<()>;
async fn get_history(&self, agent: &str) -> anyhow::Result<Vec<StatusMessage>>;
async fn get_agents(&self) -> anyhow::Result<Vec<UptimeMessage>>;
}
@@ -51,7 +75,7 @@ pub enum StorageStrategy {
}
pub struct StorageRepositoryImpl {
inner: Arc<dyn StorageRepository>
inner: Arc<dyn StorageRepository>,
}
impl StorageRepositoryImpl {

View File

@@ -1,13 +1,15 @@
use anyhow::Ok;
use async_trait::async_trait;
use chrono::Utc;
use common::StatusMessage;
use rusqlite::Connection;
use tokio::sync::Mutex;
use std::path::Path;
use chrono::{DateTime, Utc};
use tokio::sync::Mutex;
use super::{StorageRepository, UptimeMessage, UptimeStorageModel};
pub struct SQLiteRepository {
conn: Mutex<Connection>
conn: Mutex<Connection>,
}
impl SQLiteRepository {
@@ -27,53 +29,101 @@ impl SQLiteRepository {
message_count INTEGER NOT NULL DEFAULT 0
) STRICT;
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY AUTO_INCREMENT,
agent_id TEXT NOT NULL,
message TEXT NOT NULL,
timestamp TEXT NOT NULL
FOREIGN KEY (agent_id) REFERENCES agents(id) ON DELETE CASCADE
) STRICT;
CREATE INDEX IF NOT EXISTS idx_agents_id ON agents(id);
CREATE INDEX IF NOT EXISTS idx_agents_times ON agents(first_seen, last_seen);
"#
).unwrap();
CREATE INDEX IF NOT EXISTS idx_messages_agent_id ON messages(agent_id);
CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages(timestamp);
"#,
)
.unwrap();
Self {
conn: Mutex::new(conn)
conn: Mutex::new(conn),
}
}
}
#[async_trait]
impl StorageRepository for SQLiteRepository {
async fn record_uptime(&self, agent: &str) -> anyhow::Result<()> {
async fn record_message(&self, message: &StatusMessage) -> anyhow::Result<()> {
let conn = self.conn.lock().await;
let now = Utc::now().to_rfc3339();
conn.execute(r#"
INSERT INTO agents (id, first_seen, last_seen, message_count)
VALUES (?1, ?2, ?2, 1)
ON CONFLICT (id) DO UPDATE SET
last_seen = excluded.last_seen,
message_count = message_count + 1;
"#, [agent, &now]
let payload_str = message.to_string().unwrap();
conn.execute(
r#"
INSERT INTO messages (agent_id, message, timestamp)
VALUES (?1, ?2, ?3)
"#,
[
&message.agent,
&payload_str,
&message.timestamp.to_rfc3339(),
],
)?;
Ok(())
}
async fn get_agents(&self) -> anyhow::Result<Vec<UptimeMessage>> {
async fn record_uptime(&self, agent: &str) -> anyhow::Result<()> {
let conn = self.conn.lock().await;
let mut stmt = conn.prepare("SELECT id, first_seen, last_seen, message_count FROM agents")?;
let now = Utc::now().to_rfc3339();
let result = stmt.query_map([], |row| {
let first_seen: DateTime<Utc> = row.get::<_, String>(1)?.parse().unwrap();
let last_seen: DateTime<Utc> = row.get::<_, String>(2)?.parse().unwrap();
conn.execute(
r#"
INSERT INTO agents (id, first_seen, last_seen, message_count)
VALUES (?1, ?2, ?2, 1)
ON CONFLICT (id) DO UPDATE SET
last_seen = excluded.last_seen,
message_count = message_count + 1;
"#,
[agent, &now],
)?;
Ok(UptimeStorageModel {
id: row.get(0)?,
first_seen,
last_seen,
message_count: row.get(3)?,
Ok(())
}
async fn get_history(&self, agent: &str) -> anyhow::Result<Vec<StatusMessage>> {
let conn = self.conn.lock().await;
let mut stmt =
conn.prepare("SELECT agent_id, message, timestamp FROM messages WHERE agent_id = ?")?;
let rows = stmt.query_map([agent], |row| {
let row: String = row.get::<_, String>(1).unwrap();
StatusMessage::try_from(row).map_err(|e| {
rusqlite::Error::FromSqlConversionFailure(
1,
rusqlite::types::Type::Text,
Box::new(e),
)
})
})?;
let models: Result<Vec<UptimeStorageModel>, _> = result.collect();
let models: Vec<StatusMessage> = rows.collect::<Result<Vec<StatusMessage>, _>>()?;
Ok(models?.into_iter().map(Into::into).collect())
Ok(models.into_iter().map(Into::into).collect())
}
async fn get_agents(&self) -> anyhow::Result<Vec<UptimeMessage>> {
let conn = self.conn.lock().await;
let mut stmt =
conn.prepare("SELECT id, first_seen, last_seen, message_count FROM agents")?;
let rows = stmt.query_map([], |row| UptimeStorageModel::try_from(row))?;
let models: Vec<UptimeStorageModel> =
rows.collect::<Result<Vec<UptimeStorageModel>, _>>()?;
Ok(models.into_iter().map(Into::into).collect())
}
}