big refactor

This commit is contained in:
Abdenasser 2024-11-12 23:19:53 +01:00
parent a5793eb887
commit 30c5962b26
7 changed files with 375 additions and 332 deletions

156
src-tauri/src/commands.rs Normal file
View File

@ -0,0 +1,156 @@
use crate::state::AppState;
use crate::system::collect_system_stats;
use crate::types::{ProcessInfo, ProcessStaticInfo, SystemStats};
use std::time::{SystemTime, UNIX_EPOCH};
use sysinfo::{PidExt, ProcessExt, ProcessStatus, SystemExt};
use tauri::State;
#[tauri::command]
pub async fn get_processes(
state: State<'_, AppState>,
) -> Result<(Vec<ProcessInfo>, SystemStats), String> {
let processes_data;
let system_stats;
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("Failed to get system time: {}", e))?
.as_secs();
let mut sys = state
.sys
.lock()
.map_err(|e| format!("Failed to lock system state: {}", e))?;
sys.refresh_all();
sys.refresh_networks_list();
sys.refresh_disks_list();
processes_data = collect_process_data(&sys, current_time);
system_stats = collect_system_stats(&mut sys, &state)
.map_err(|e| format!("Failed to collect system stats: {}", e))?;
let mut process_cache = state
.process_cache
.lock()
.map_err(|e| format!("Failed to lock process cache: {}", e))?;
let processes = build_process_info(processes_data, &mut process_cache);
Ok((processes, system_stats))
}
#[tauri::command]
pub async fn kill_process(pid: u32, state: State<'_, AppState>) -> Result<bool, String> {
let sys = state
.sys
.lock()
.map_err(|e| format!("Failed to lock system state for process termination: {}", e))?;
if let Some(process) = sys.process(sysinfo::Pid::from(pid as usize)) {
Ok(process.kill())
} else {
Ok(false)
}
}
// Helper functions
fn collect_process_data(sys: &sysinfo::System, current_time: u64) -> Vec<ProcessData> {
sys.processes()
.iter()
.map(|(pid, process)| {
let start_time = process.start_time();
let run_time = if start_time > 0 {
current_time.saturating_sub(start_time)
} else {
0
};
ProcessData {
pid: pid.as_u32(),
name: process.name().to_string(),
cmd: process.cmd().to_vec(),
user_id: process.user_id().map(|uid| uid.to_string()),
cpu_usage: process.cpu_usage(),
memory: process.memory(),
status: process.status(),
ppid: process.parent().map(|p| p.as_u32()),
environ: process.environ().to_vec(),
root: process.root().to_string_lossy().into_owned(),
virtual_memory: process.virtual_memory(),
start_time,
run_time,
disk_read: process.disk_usage().read_bytes,
disk_written: process.disk_usage().written_bytes,
session_id: process.session_id().map(|id| id.as_u32()),
}
})
.collect()
}
// Helper struct for intermediate process data
struct ProcessData {
pid: u32,
name: String,
cmd: Vec<String>,
user_id: Option<String>,
cpu_usage: f32,
memory: u64,
status: ProcessStatus,
ppid: Option<u32>,
environ: Vec<String>,
root: String,
virtual_memory: u64,
start_time: u64,
run_time: u64,
disk_read: u64,
disk_written: u64,
session_id: Option<u32>,
}
// Helper function to build process info
fn build_process_info(
processes: Vec<ProcessData>,
process_cache: &mut std::collections::HashMap<u32, ProcessStaticInfo>,
) -> Vec<ProcessInfo> {
processes
.into_iter()
.map(|data| {
// Update or get from cache
let cached_info = process_cache
.entry(data.pid)
.or_insert_with(|| ProcessStaticInfo {
name: data.name.clone(),
command: data.cmd.join(" "),
user: data.user_id.clone().unwrap_or_else(|| "-".to_string()),
});
let status_str = match data.status {
ProcessStatus::Run => "Running",
ProcessStatus::Sleep => "Sleeping",
ProcessStatus::Idle => "Idle",
_ => "Unknown",
};
ProcessInfo {
pid: data.pid,
ppid: data.ppid.unwrap_or(0),
name: cached_info.name.clone(),
cpu_usage: data.cpu_usage,
memory_usage: data.memory,
status: status_str.to_string(),
user: cached_info.user.clone(),
command: cached_info.command.clone(),
threads: None,
environ: data.environ,
root: data.root,
virtual_memory: data.virtual_memory,
start_time: data.start_time,
run_time: data.run_time,
disk_usage: (data.disk_read, data.disk_written),
session_id: data.session_id,
}
})
.collect()
}

View File

@ -1,326 +1,14 @@
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::collections::HashMap;
use std::sync::Mutex;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use sysinfo::{
CpuExt, Disk, DiskExt, NetworkExt, NetworksExt, PidExt, ProcessExt, ProcessStatus, System,
SystemExt,
};
use tauri::{Manager, State};
#[cfg(target_os = "windows")]
use window_vibrancy::apply_acrylic;
#[cfg(target_os = "macos")]
use window_vibrancy::{apply_vibrancy, NSVisualEffectMaterial, NSVisualEffectState};
mod commands;
mod state;
mod system;
mod types;
mod window;
struct AppState {
sys: Mutex<System>,
process_cache: Mutex<HashMap<u32, ProcessStaticInfo>>,
last_network_update: Mutex<(Instant, u64, u64)>,
}
impl AppState {
pub fn new() -> Self {
let mut sys = System::new();
sys.refresh_all();
// Initialize network stats
let initial_rx = sys
.networks()
.iter()
.map(|(_, data)| data.total_received())
.sum();
let initial_tx = sys
.networks()
.iter()
.map(|(_, data)| data.total_transmitted())
.sum();
Self {
sys: Mutex::new(sys),
process_cache: Mutex::new(HashMap::new()),
last_network_update: Mutex::new((Instant::now(), initial_rx, initial_tx)),
}
}
}
#[derive(Clone)]
struct ProcessStaticInfo {
name: String,
command: String,
user: String,
}
#[derive(serde::Serialize)]
struct ProcessInfo {
pid: u32,
ppid: u32,
name: String,
cpu_usage: f32,
memory_usage: u64,
status: String,
user: String,
command: String,
threads: Option<u32>,
environ: Vec<String>,
root: String,
virtual_memory: u64,
start_time: u64,
run_time: u64,
disk_usage: (u64, u64), // (read_bytes, written_bytes)
session_id: Option<u32>,
}
#[derive(serde::Serialize)]
pub struct SystemStats {
pub cpu_usage: Vec<f32>,
pub memory_total: u64,
pub memory_used: u64,
pub memory_free: u64,
pub memory_cached: u64,
pub uptime: u64,
pub load_avg: [f64; 3],
pub network_rx_bytes: u64,
pub network_tx_bytes: u64,
pub disk_total_bytes: u64,
pub disk_used_bytes: u64,
pub disk_free_bytes: u64,
}
// Assume MacOS or Linux
#[cfg(not(target_os = "windows"))]
fn filter_disks(disks: &[Disk]) -> Vec<&sysinfo::Disk> {
disks
.iter()
.filter(|disk| {
// Filter for physical disks - typically those mounted at "/"
disk.mount_point() == std::path::Path::new("/")
})
.collect()
}
#[cfg(target_os = "windows")]
fn filter_disks(disks: &[Disk]) -> Vec<&sysinfo::Disk> {
disks.iter().collect()
}
#[tauri::command]
async fn get_processes(
state: State<'_, AppState>,
) -> Result<(Vec<ProcessInfo>, SystemStats), String> {
let processes_data;
let system_stats;
// Get current time once for all calculations
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| e.to_string())?
.as_secs();
// Scope for system lock
{
let mut sys = state
.sys
.lock()
.map_err(|_| "Failed to lock system state")?;
sys.refresh_all();
sys.refresh_networks_list();
sys.refresh_disks_list();
// Collect all the process data we need while holding sys lock
processes_data = sys
.processes()
.iter()
.map(|(pid, process)| {
let start_time = process.start_time();
let run_time = if start_time > 0 {
current_time.saturating_sub(start_time)
} else {
0
};
(
pid.as_u32(),
process.name().to_string(),
process.cmd().to_vec(),
process.user_id().map(|uid| uid.to_string()),
process.cpu_usage(),
process.memory(),
process.status(),
process.parent().map(|p| p.as_u32()),
process.environ().to_vec(),
process.root().to_string_lossy().into_owned(),
process.virtual_memory(),
start_time,
run_time, // Use calculated run_time
process.disk_usage().read_bytes,
process.disk_usage().written_bytes,
process.session_id().map(|id| id.as_u32()),
)
})
.collect::<Vec<_>>();
// Calculate total network I/O
let mut last_update = state
.last_network_update
.lock()
.map_err(|_| "Failed to lock network state")?;
let elapsed = last_update.0.elapsed().as_secs_f64();
let current_time = Instant::now();
let current_rx: u64 = sys
.networks()
.iter()
.map(|(_, data)| data.total_received())
.sum();
let current_tx: u64 = sys
.networks()
.iter()
.map(|(_, data)| data.total_transmitted())
.sum();
let network_stats = (
((current_rx - last_update.1) as f64 / elapsed) as u64,
((current_tx - last_update.2) as f64 / elapsed) as u64,
);
*last_update = (current_time, current_rx, current_tx);
// Calculate total disk usage
let disk_stats = filter_disks(&sys.disks())
.iter()
.fold((0, 0, 0), |acc, disk| {
(
acc.0 + disk.total_space(),
acc.1 + disk.total_space() - disk.available_space(),
acc.2 + disk.available_space(),
)
});
system_stats = SystemStats {
cpu_usage: sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect(),
memory_total: sys.total_memory(),
memory_used: sys.used_memory(),
memory_free: sys.total_memory() - sys.used_memory(),
memory_cached: sys.total_memory()
- (sys.used_memory() + (sys.total_memory() - sys.used_memory())),
uptime: sys.uptime(),
load_avg: [
sys.load_average().one,
sys.load_average().five,
sys.load_average().fifteen,
],
network_rx_bytes: network_stats.0,
network_tx_bytes: network_stats.1,
disk_total_bytes: disk_stats.0,
disk_used_bytes: disk_stats.1,
disk_free_bytes: disk_stats.2,
};
} // sys lock is automatically dropped here
// Now lock the process cache
let mut process_cache = state
.process_cache
.lock()
.map_err(|_| "Failed to lock process cache")?;
// Build the process info list
let processes = processes_data
.into_iter()
.map(
|(
pid,
name,
cmd,
user_id,
cpu_usage,
memory,
status,
ppid,
environ,
root,
virtual_memory,
start_time,
run_time,
disk_read,
disk_written,
session_id,
)| {
let static_info = process_cache
.entry(pid)
.or_insert_with(|| ProcessStaticInfo {
name: name.clone(),
command: cmd.join(" "),
user: user_id.unwrap_or_else(|| "-".to_string()),
});
let status_str = match status {
ProcessStatus::Run => "Running",
ProcessStatus::Sleep => "Sleeping",
ProcessStatus::Idle => "Idle",
_ => "Unknown",
};
ProcessInfo {
pid,
ppid: ppid.unwrap_or(0),
name: static_info.name.clone(),
cpu_usage,
memory_usage: memory,
status: status_str.to_string(),
user: static_info.user.clone(),
command: static_info.command.clone(),
threads: None,
environ,
root,
virtual_memory,
start_time,
run_time,
disk_usage: (disk_read, disk_written),
session_id,
}
},
)
.collect();
Ok((processes, system_stats))
}
#[tauri::command]
async fn kill_process(pid: u32, state: State<'_, AppState>) -> Result<bool, String> {
let sys = state
.sys
.lock()
.map_err(|_| "Failed to lock system state")?;
if let Some(process) = sys.process(sysinfo::Pid::from(pid as usize)) {
Ok(process.kill())
} else {
Ok(false)
}
}
#[cfg(target_os = "windows")]
fn setup_window_effects(window: &tauri::WebviewWindow) -> Result<(), Box<dyn std::error::Error>> {
apply_acrylic(window, Some((0, 0, 25, 125)))?;
Ok(())
}
#[cfg(target_os = "macos")]
fn setup_window_effects(window: &tauri::WebviewWindow) -> Result<(), Box<dyn std::error::Error>> {
apply_vibrancy(
window,
NSVisualEffectMaterial::HudWindow,
Some(NSVisualEffectState::Active),
None,
)?;
Ok(())
}
#[cfg(not(any(target_os = "windows", target_os = "macos")))]
fn setup_window_effects(_window: &tauri::WebviewWindow) -> Result<(), Box<dyn std::error::Error>> {
// No-op for other platforms
Ok(())
}
use state::AppState;
use tauri::Manager;
use window::setup_window_effects;
fn main() {
tauri::Builder::default()
@ -332,7 +20,10 @@ fn main() {
.plugin(tauri_plugin_shell::init())
.plugin(tauri_plugin_os::init())
.manage(AppState::new())
.invoke_handler(tauri::generate_handler![get_processes, kill_process])
.invoke_handler(tauri::generate_handler![
commands::get_processes,
commands::kill_process
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}

35
src-tauri/src/state.rs Normal file
View File

@ -0,0 +1,35 @@
use crate::types::ProcessStaticInfo;
use std::collections::HashMap;
use std::sync::Mutex;
use std::time::Instant;
use sysinfo::{NetworkExt, NetworksExt, System, SystemExt};
pub struct AppState {
pub sys: Mutex<System>,
pub process_cache: Mutex<HashMap<u32, ProcessStaticInfo>>,
pub last_network_update: Mutex<(Instant, u64, u64)>,
}
impl AppState {
pub fn new() -> Self {
let mut sys = System::new();
sys.refresh_all();
let initial_rx = sys
.networks()
.iter()
.map(|(_, data)| data.total_received())
.sum();
let initial_tx = sys
.networks()
.iter()
.map(|(_, data)| data.total_transmitted())
.sum();
Self {
sys: Mutex::new(sys),
process_cache: Mutex::new(HashMap::new()),
last_network_update: Mutex::new((Instant::now(), initial_rx, initial_tx)),
}
}
}

93
src-tauri/src/system.rs Normal file
View File

@ -0,0 +1,93 @@
use crate::state::AppState;
use crate::types::SystemStats;
use std::path::Path;
use std::time::Instant;
use sysinfo::{CpuExt, Disk, DiskExt, NetworkExt, NetworksExt, SystemExt};
#[cfg(not(target_os = "windows"))]
pub fn filter_disks(disks: &[Disk]) -> Vec<&sysinfo::Disk> {
disks
.iter()
.filter(|disk| {
// Filter for physical disks - typically those mounted at "/"
disk.mount_point() == Path::new("/")
})
.collect()
}
#[cfg(target_os = "windows")]
pub fn filter_disks(disks: &[Disk]) -> Vec<&sysinfo::Disk> {
disks.iter().collect()
}
fn calculate_network_stats(
sys: &sysinfo::System,
last_update: &mut (Instant, u64, u64),
elapsed: f64,
) -> (u64, u64) {
let current_rx: u64 = sys
.networks()
.iter()
.map(|(_, data)| data.total_received())
.sum();
let current_tx: u64 = sys
.networks()
.iter()
.map(|(_, data)| data.total_transmitted())
.sum();
let rx_rate = ((current_rx - last_update.1) as f64 / elapsed) as u64;
let tx_rate = ((current_tx - last_update.2) as f64 / elapsed) as u64;
last_update.0 = Instant::now();
last_update.1 = current_rx;
last_update.2 = current_tx;
(rx_rate, tx_rate)
}
fn calculate_disk_stats(sys: &sysinfo::System) -> (u64, u64, u64) {
let disks = filter_disks(sys.disks());
let total: u64 = disks.iter().map(|disk| disk.total_space()).sum();
let used: u64 = disks
.iter()
.map(|disk| disk.total_space() - disk.available_space())
.sum();
let free: u64 = disks.iter().map(|disk| disk.available_space()).sum();
(total, used, free)
}
pub fn collect_system_stats(
sys: &mut sysinfo::System,
state: &AppState,
) -> Result<SystemStats, String> {
let mut last_update = state
.last_network_update
.lock()
.map_err(|e| format!("Failed to lock network state: {}", e))?;
let elapsed = last_update.0.elapsed().as_secs_f64();
let (network_rx, network_tx) = calculate_network_stats(sys, &mut last_update, elapsed);
let (disk_total, disk_used, disk_free) = calculate_disk_stats(sys);
Ok(SystemStats {
cpu_usage: sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect(),
memory_total: sys.total_memory(),
memory_used: sys.used_memory(),
memory_free: sys.total_memory() - sys.used_memory(),
memory_cached: sys.total_memory()
- (sys.used_memory() + (sys.total_memory() - sys.used_memory())),
uptime: sys.uptime(),
load_avg: [
sys.load_average().one,
sys.load_average().five,
sys.load_average().fifteen,
],
network_rx_bytes: network_rx,
network_tx_bytes: network_tx,
disk_total_bytes: disk_total,
disk_used_bytes: disk_used,
disk_free_bytes: disk_free,
})
}

44
src-tauri/src/types.rs Normal file
View File

@ -0,0 +1,44 @@
use serde::Serialize;
#[derive(Clone)]
pub struct ProcessStaticInfo {
pub name: String,
pub command: String,
pub user: String,
}
#[derive(Serialize)]
pub struct ProcessInfo {
pub pid: u32,
pub ppid: u32,
pub name: String,
pub cpu_usage: f32,
pub memory_usage: u64,
pub status: String,
pub user: String,
pub command: String,
pub threads: Option<u32>,
pub environ: Vec<String>,
pub root: String,
pub virtual_memory: u64,
pub start_time: u64,
pub run_time: u64,
pub disk_usage: (u64, u64),
pub session_id: Option<u32>,
}
#[derive(Serialize)]
pub struct SystemStats {
pub cpu_usage: Vec<f32>,
pub memory_total: u64,
pub memory_used: u64,
pub memory_free: u64,
pub memory_cached: u64,
pub uptime: u64,
pub load_avg: [f64; 3],
pub network_rx_bytes: u64,
pub network_tx_bytes: u64,
pub disk_total_bytes: u64,
pub disk_used_bytes: u64,
pub disk_free_bytes: u64,
}

32
src-tauri/src/window.rs Normal file
View File

@ -0,0 +1,32 @@
#[cfg(target_os = "windows")]
use window_vibrancy::apply_acrylic;
#[cfg(target_os = "macos")]
use window_vibrancy::{apply_vibrancy, NSVisualEffectMaterial, NSVisualEffectState};
#[cfg(target_os = "windows")]
pub fn setup_window_effects(
window: &tauri::WebviewWindow,
) -> Result<(), Box<dyn std::error::Error>> {
apply_acrylic(window, Some((0, 0, 25, 125)))?;
Ok(())
}
#[cfg(target_os = "macos")]
pub fn setup_window_effects(
window: &tauri::WebviewWindow,
) -> Result<(), Box<dyn std::error::Error>> {
apply_vibrancy(
window,
NSVisualEffectMaterial::HudWindow,
Some(NSVisualEffectState::Active),
None,
)?;
Ok(())
}
#[cfg(not(any(target_os = "windows", target_os = "macos")))]
pub fn setup_window_effects(
_window: &tauri::WebviewWindow,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}

View File

@ -255,18 +255,11 @@
selectedProcessPid = null;
}
let minLoadingTimer: ReturnType<typeof setTimeout>;
const MIN_LOADING_TIME = 2000; // Show loading screen for at least 2 seconds
onMount(async () => {
const loadingPromise = Promise.all([getProcesses()]);
const timerPromise = new Promise((resolve) => {
minLoadingTimer = setTimeout(resolve, MIN_LOADING_TIME);
});
try {
// Wait for both the data to load AND the minimum time to pass
await Promise.all([loadingPromise, timerPromise]);
await getProcesses();
} catch (error) {
console.error("Failed to load processes:", error);
} finally {
isLoading = false;
}
@ -277,7 +270,6 @@
onDestroy(() => {
if (intervalId) clearInterval(intervalId);
if (minLoadingTimer) clearTimeout(minLoadingTimer);
});
</script>