structure
This commit is contained in:
3
.idea/p2p-merkel.iml
generated
3
.idea/p2p-merkel.iml
generated
@@ -2,7 +2,8 @@
|
|||||||
<module type="EMPTY_MODULE" version="4">
|
<module type="EMPTY_MODULE" version="4">
|
||||||
<component name="NewModuleRootManager">
|
<component name="NewModuleRootManager">
|
||||||
<content url="file://$MODULE_DIR$">
|
<content url="file://$MODULE_DIR$">
|
||||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
<sourceFolder url="file://$MODULE_DIR$/client-gui/src" isTestSource="false" />
|
||||||
|
<sourceFolder url="file://$MODULE_DIR$/client-network/src" isTestSource="false" />
|
||||||
<excludeFolder url="file://$MODULE_DIR$/target" />
|
<excludeFolder url="file://$MODULE_DIR$/target" />
|
||||||
</content>
|
</content>
|
||||||
<orderEntry type="inheritedJdk" />
|
<orderEntry type="inheritedJdk" />
|
||||||
|
|||||||
4
.idea/vcs.xml
generated
4
.idea/vcs.xml
generated
@@ -1,6 +1,8 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<project version="4">
|
<project version="4">
|
||||||
<component name="VcsDirectoryMappings">
|
<component name="VcsDirectoryMappings">
|
||||||
<mapping directory="" vcs="Git" />
|
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||||
|
<mapping directory="$PROJECT_DIR$/client-gui" vcs="Git" />
|
||||||
|
<mapping directory="$PROJECT_DIR$/client-network" vcs="Git" />
|
||||||
</component>
|
</component>
|
||||||
</project>
|
</project>
|
||||||
4118
Cargo.lock
generated
4118
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
15
Cargo.toml
15
Cargo.toml
@@ -1,9 +1,6 @@
|
|||||||
[package]
|
[workspace]
|
||||||
name = "p2p-merkel"
|
members = [
|
||||||
version = "0.1.0"
|
"client-gui",
|
||||||
edition = "2024"
|
"client-network",
|
||||||
|
]
|
||||||
[dependencies]
|
resolver = "2"
|
||||||
fltk = "1.5.22"
|
|
||||||
sha2 = "0.10.9"
|
|
||||||
tokio = { version = "1.48.0", features = ["rt", "rt-multi-thread", "macros"] }
|
|
||||||
|
|||||||
1
client-gui/.gitignore
vendored
Normal file
1
client-gui/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/target
|
||||||
13
client-gui/Cargo.toml
Normal file
13
client-gui/Cargo.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "client-gui"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
crossbeam-channel = "0.5.15"
|
||||||
|
eframe = "0.33.2"
|
||||||
|
egui = "0.33.2"
|
||||||
|
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
|
||||||
|
|
||||||
|
client-network = { path = "../client-network" }
|
||||||
|
hex = "0.4.3"
|
||||||
BIN
client-gui/assets/icon.png
Normal file
BIN
client-gui/assets/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 8.4 KiB |
299
client-gui/src/gui_app.rs
Normal file
299
client-gui/src/gui_app.rs
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
use client_network::{node_hash_to_hex_string, MerkleNode, NetworkCommand, NetworkEvent, NodeHash};
|
||||||
|
use crossbeam_channel::{Receiver, Sender};
|
||||||
|
use egui::{Align, Button, CentralPanel, CollapsingHeader, Context, Layout, ScrollArea, SidePanel, TopBottomPanel, Ui, ViewportCommand};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
pub struct FileNode {
|
||||||
|
pub name: String,
|
||||||
|
pub is_dir: bool,
|
||||||
|
pub hash_id: String, // The Merkle root or leaf hash (String)
|
||||||
|
pub children: Option<Vec<FileNode>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fn build_file_node_recursively(
|
||||||
|
hash: &NodeHash,
|
||||||
|
storage: &HashMap<NodeHash, MerkleNode>,
|
||||||
|
name: String,
|
||||||
|
) -> Option<FileNode> {
|
||||||
|
let node = storage.get(hash)?;
|
||||||
|
let hash_id = hex::encode(hash);
|
||||||
|
|
||||||
|
match node {
|
||||||
|
MerkleNode::Directory(dir_node) => {
|
||||||
|
// Recurse through all entries to build children
|
||||||
|
let children: Vec<FileNode> = dir_node.entries.iter().filter_map(|entry| {
|
||||||
|
let filename_lossy = String::from_utf8_lossy(&entry.filename)
|
||||||
|
.trim_end_matches('\0')
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
build_file_node_recursively(&entry.content_hash, storage, filename_lossy)
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
Some(FileNode {
|
||||||
|
name,
|
||||||
|
is_dir: true,
|
||||||
|
hash_id,
|
||||||
|
children: Some(children),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
MerkleNode::BigDirectory(big_dir_node) => {
|
||||||
|
// In a real system, BigDirectory children would have names stored in an index.
|
||||||
|
// Here, we generate dummy names to show recursion working.
|
||||||
|
let children: Vec<FileNode> = big_dir_node.children_hashes.iter().filter_map(|child_hash| {
|
||||||
|
let dummy_name = format!("chunk_group_{}", &hex::encode(child_hash)[..4]);
|
||||||
|
build_file_node_recursively(child_hash, storage, dummy_name)
|
||||||
|
}).collect();
|
||||||
|
|
||||||
|
Some(FileNode {
|
||||||
|
name,
|
||||||
|
is_dir: true,
|
||||||
|
hash_id,
|
||||||
|
children: Some(children),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Chunk or Big nodes are files (leaves in the file tree)
|
||||||
|
_ => Some(FileNode {
|
||||||
|
name,
|
||||||
|
is_dir: false,
|
||||||
|
hash_id,
|
||||||
|
children: None,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn convert_merkle_to_file_nodes(root_hash: NodeHash, storage: &HashMap<NodeHash, MerkleNode>) -> Option<FileNode> {
|
||||||
|
let root_name = "/".to_string();
|
||||||
|
build_file_node_recursively(&root_hash, storage, root_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Main Application Struct ---
|
||||||
|
pub struct P2PClientApp {
|
||||||
|
// Communication channels
|
||||||
|
network_cmd_tx: Sender<NetworkCommand>,
|
||||||
|
network_event_rx: Receiver<NetworkEvent>,
|
||||||
|
|
||||||
|
// GUI State
|
||||||
|
status_message: String,
|
||||||
|
known_peers: Vec<String>,
|
||||||
|
connect_address_input: String,
|
||||||
|
|
||||||
|
peer_root_hash: HashMap<String, String>, // peer_id -> root_hash
|
||||||
|
|
||||||
|
// Key: Parent Directory Hash (String), Value: List of children FileNode
|
||||||
|
loaded_tree_nodes: HashMap<String, FileNode>,
|
||||||
|
|
||||||
|
// Which peer's tree we are currently displaying
|
||||||
|
active_peer_id: Option<String>,
|
||||||
|
active_root_hash: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl P2PClientApp {
|
||||||
|
pub fn new(cmd_tx: Sender<NetworkCommand>, event_rx: Receiver<NetworkEvent>) -> Self {
|
||||||
|
let (root_hash, tree) = MerkleNode::generate_random_tree(5).expect("Couldn't generate tree");
|
||||||
|
let mut peer_root_hash = HashMap::new();
|
||||||
|
peer_root_hash.insert("bob".to_string(), "yoyoyoyo".to_string());
|
||||||
|
|
||||||
|
let mut loaded_tree_nodes = HashMap::new();
|
||||||
|
loaded_tree_nodes.insert(node_hash_to_hex_string(&root_hash), convert_merkle_to_file_nodes(root_hash, &tree).expect("Couldn't convert tree"));
|
||||||
|
|
||||||
|
Self {
|
||||||
|
network_cmd_tx: cmd_tx,
|
||||||
|
network_event_rx: event_rx,
|
||||||
|
status_message: "Client Initialized. Awaiting network status...".to_string(),
|
||||||
|
known_peers: vec![
|
||||||
|
"bob".to_string()
|
||||||
|
],
|
||||||
|
connect_address_input: "127.0.0.1:8080".to_string(),
|
||||||
|
peer_root_hash,
|
||||||
|
loaded_tree_nodes,
|
||||||
|
active_peer_id: None,
|
||||||
|
active_root_hash: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- eframe::App Trait Implementation ---
|
||||||
|
|
||||||
|
impl eframe::App for P2PClientApp {
|
||||||
|
fn update(&mut self, ctx: &Context, _frame: &mut eframe::Frame) {
|
||||||
|
|
||||||
|
// 1. Process incoming Network Events
|
||||||
|
// We poll the channel and update the GUI state for every event received.
|
||||||
|
while let Ok(event) = self.network_event_rx.try_recv() {
|
||||||
|
match event {
|
||||||
|
NetworkEvent::PeerConnected(addr) => {
|
||||||
|
self.status_message = format!("✅ Peer connected: {}", addr);
|
||||||
|
if !self.known_peers.contains(&addr) {
|
||||||
|
self.known_peers.push(addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
NetworkEvent::PeerListUpdated(peers) => {
|
||||||
|
self.known_peers = peers;
|
||||||
|
}
|
||||||
|
NetworkEvent::FileTreeReceived(_peer_id, _) => {
|
||||||
|
// self.loaded_tree_nodes.insert(_peer_id, tree);
|
||||||
|
self.status_message = "🔄 File tree updated successfully.".to_string();
|
||||||
|
}
|
||||||
|
NetworkEvent::FileTreeRootReceived(peer_id, root_hash) => {
|
||||||
|
self.status_message = format!("🔄 Received Merkle Root from {}: {}", peer_id, &root_hash[..8]);
|
||||||
|
self.peer_root_hash.insert(peer_id.clone(), root_hash.clone());
|
||||||
|
|
||||||
|
self.active_peer_id = Some(peer_id.clone());
|
||||||
|
self.active_root_hash = Some(root_hash.clone());
|
||||||
|
|
||||||
|
// Request the content of the root directory immediately
|
||||||
|
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
|
peer_id,
|
||||||
|
root_hash,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
// Handle other events like Disconnect, Error, etc.
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Menu Bar
|
||||||
|
TopBottomPanel::top("top_panel").show(ctx, |ui| {
|
||||||
|
egui::MenuBar::new().ui(ui, |ui| {
|
||||||
|
ui.menu_button("File", |ui| {
|
||||||
|
if ui.button("Quit").clicked() {
|
||||||
|
// Use ViewportCommand to request a close
|
||||||
|
ctx.send_viewport_cmd(ViewportCommand::Close);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ui.menu_button("Network", |ui| {
|
||||||
|
ui.horizontal(|ui| {
|
||||||
|
ui.label("Connect to:");
|
||||||
|
ui.text_edit_singleline(&mut self.connect_address_input);
|
||||||
|
if ui.button("Connect").clicked() {
|
||||||
|
let addr = self.connect_address_input.clone();
|
||||||
|
let _ = self.network_cmd_tx.send(NetworkCommand::ConnectPeer(addr));
|
||||||
|
ui.close();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// 3. Right-sided Panel (Known Peers)
|
||||||
|
SidePanel::right("right_panel").resizable(true).min_width(180.0).show(ctx, |ui| {
|
||||||
|
ui.heading("🌐 Known Peers");
|
||||||
|
ui.separator();
|
||||||
|
ScrollArea::vertical().show(ui, |ui| {
|
||||||
|
if self.known_peers.is_empty() {
|
||||||
|
ui.add_space(10.0);
|
||||||
|
ui.label("No active peers.");
|
||||||
|
} else {
|
||||||
|
for peer in &self.known_peers {
|
||||||
|
let is_active = self.active_peer_id.as_ref().map_or(false, |id| id == peer);
|
||||||
|
let root_hash_str = self.peer_root_hash.get(peer)
|
||||||
|
.map(|h| format!("Root: {}", &h[..8]))
|
||||||
|
.unwrap_or_else(|| "Root: N/A".to_string());
|
||||||
|
|
||||||
|
if ui.selectable_label(is_active, format!("{} ({})", peer, root_hash_str)).clicked() {
|
||||||
|
// Switch to displaying this peer's tree
|
||||||
|
self.active_peer_id = Some(peer.clone());
|
||||||
|
if let Some(hash) = self.peer_root_hash.get(peer) {
|
||||||
|
self.active_root_hash = Some(hash.clone());
|
||||||
|
|
||||||
|
// Request root content if not loaded
|
||||||
|
if !self.loaded_tree_nodes.contains_key(hash) {
|
||||||
|
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
|
peer.clone(),
|
||||||
|
hash.clone(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// 4. Central Panel (Filesystem Tree)
|
||||||
|
CentralPanel::default().show(ctx, |ui| {
|
||||||
|
ui.heading("📂 Decentralized File System");
|
||||||
|
ui.separator();
|
||||||
|
|
||||||
|
if let Some(root_hash) = &self.active_root_hash {
|
||||||
|
if let Some(root_nodes) = self.loaded_tree_nodes.get(root_hash) {
|
||||||
|
ScrollArea::vertical().show(ui, |ui| {
|
||||||
|
// Start drawing the tree from the root hash
|
||||||
|
self.draw_file_tree(ui, root_nodes, 0);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
ui.label(format!("Loading root content for hash: {}", &root_hash[..8]));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ui.label("Connect to a peer to view a file tree.");
|
||||||
|
}
|
||||||
|
|
||||||
|
ui.separator();
|
||||||
|
ui.add_space(5.0);
|
||||||
|
// This is now safe because draw_file_tree only takes an immutable borrow
|
||||||
|
// ui.label(format!("Status: {}", self.status_message));
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx.request_repaint_after(std::time::Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Helper for Drawing the Recursive File Tree ---
|
||||||
|
|
||||||
|
impl P2PClientApp {
|
||||||
|
fn draw_file_tree(&self, ui: &mut Ui, node: &FileNode, depth: usize) {
|
||||||
|
let indent_space = 15.0 * depth as f32;
|
||||||
|
let active_peer_id = self.active_peer_id.clone();
|
||||||
|
|
||||||
|
let entry_hash = &node.hash_id;
|
||||||
|
let filename = &node.name;
|
||||||
|
let is_dir = node.is_dir;
|
||||||
|
|
||||||
|
if is_dir {
|
||||||
|
// --- Directory Node: Check if content (children) is already loaded (stored in the map) ---
|
||||||
|
|
||||||
|
if let Some(children) = node.children.as_ref() {
|
||||||
|
// Content is already loaded: draw the collapsing header and recurse
|
||||||
|
CollapsingHeader::new(format!("📁 {}", filename))
|
||||||
|
.default_open(false)
|
||||||
|
.enabled(true)
|
||||||
|
.show(ui, |ui| {
|
||||||
|
// Recursive call: iterate over children and call draw_file_tree for each
|
||||||
|
for child_node in children {
|
||||||
|
self.draw_file_tree(ui, child_node, depth + 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// Content is NOT loaded: show a clickable button to request loading
|
||||||
|
let response = ui.with_layout(Layout::left_to_right(Align::Min), |ui| {
|
||||||
|
ui.add_space(indent_space);
|
||||||
|
ui.add(Button::new(format!("▶️ {} (Load)", filename)).small()).on_hover_text(format!("Hash: {}...", &entry_hash[..8]));
|
||||||
|
}).response;
|
||||||
|
|
||||||
|
if response.clicked() {
|
||||||
|
if let Some(peer_id) = active_peer_id.clone() {
|
||||||
|
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
|
peer_id,
|
||||||
|
entry_hash.clone(),
|
||||||
|
));
|
||||||
|
// self.status_message = format!("Requested directory content for: {}...", &entry_hash[..8]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// --- File Node (Chunk or Big) ---
|
||||||
|
ui.with_layout(Layout::left_to_right(Align::Center), |ui| {
|
||||||
|
ui.add_space(indent_space);
|
||||||
|
if ui.selectable_label(false, format!("📄 {} (Hash: {}...)", filename, &entry_hash[..8])).on_hover_text("Click to request file chunks...").clicked() {
|
||||||
|
if let Some(peer_id) = active_peer_id.clone() {
|
||||||
|
let _ = self.network_cmd_tx.send(NetworkCommand::RequestChunk(peer_id, entry_hash.clone()));
|
||||||
|
// self.status_message = format!("Requested file chunks for: {}...", &entry_hash[..8]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
36
client-gui/src/main.rs
Normal file
36
client-gui/src/main.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use client_network::{start_p2p_executor, NetworkCommand, NetworkEvent};
|
||||||
|
use crate::gui_app::P2PClientApp;
|
||||||
|
|
||||||
|
mod gui_app;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> eframe::Result<()> {
|
||||||
|
// 1. Setup Channels
|
||||||
|
let (network_cmd_tx, network_cmd_rx) = crossbeam_channel::unbounded::<NetworkCommand>();
|
||||||
|
let (network_event_tx, network_event_rx) = crossbeam_channel::unbounded::<NetworkEvent>();
|
||||||
|
|
||||||
|
// 2. Start the P2P Network Executor in a separate Tokio task
|
||||||
|
// The executor runs in the background of our main async runtime.
|
||||||
|
let _network_handle = start_p2p_executor(network_cmd_rx, network_event_tx);
|
||||||
|
|
||||||
|
// 3. Configure and Run the Eframe/Egui GUI
|
||||||
|
let options = eframe::NativeOptions {
|
||||||
|
viewport: egui::ViewportBuilder::default()
|
||||||
|
.with_inner_size([1000.0, 700.0])
|
||||||
|
.with_min_inner_size([700.0, 500.0])
|
||||||
|
.with_icon(
|
||||||
|
eframe::icon_data::from_png_bytes(include_bytes!("../assets/icon.png"))
|
||||||
|
.expect("Failed to load icon"),
|
||||||
|
),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
eframe::run_native(
|
||||||
|
"Rust P2P Client (Merkle Tree Sync)",
|
||||||
|
options,
|
||||||
|
Box::new(|cc| {
|
||||||
|
let app = P2PClientApp::new(network_cmd_tx, network_event_rx);
|
||||||
|
Ok(Box::new(app))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
1
client-network/.gitignore
vendored
Normal file
1
client-network/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/target
|
||||||
11
client-network/Cargo.toml
Normal file
11
client-network/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[package]
|
||||||
|
name = "client-network"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
crossbeam-channel = "0.5.15"
|
||||||
|
tokio = { version = "1", features = ["full"] }
|
||||||
|
rand = "0.10.0-rc.5"
|
||||||
|
hex = "0.4.3"
|
||||||
|
sha2 = "0.10.9"
|
||||||
305
client-network/src/data.rs
Normal file
305
client-network/src/data.rs
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||||
|
use rand::{rng, Rng};
|
||||||
|
|
||||||
|
// --- Constants ---
|
||||||
|
const MAX_CHUNK_DATA_SIZE: usize = 1024;
|
||||||
|
const MAX_DIRECTORY_ENTRIES: usize = 16;
|
||||||
|
const MAX_BIG_CHILDREN: usize = 32;
|
||||||
|
const MIN_BIG_CHILDREN: usize = 2;
|
||||||
|
const FILENAME_HASH_SIZE: usize = 32;
|
||||||
|
const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
|
||||||
|
|
||||||
|
fn dummy_hash(data: &[u8]) -> NodeHash {
|
||||||
|
let mut hasher = DefaultHasher::new();
|
||||||
|
data.hash(&mut hasher);
|
||||||
|
let hash_u64 = hasher.finish();
|
||||||
|
|
||||||
|
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
|
||||||
|
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
|
||||||
|
for i in 0..8 {
|
||||||
|
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
|
||||||
|
}
|
||||||
|
hash_array // The rest remains 0, satisfying the 32-byte requirement
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
|
||||||
|
let mut rng = rand::rng();
|
||||||
|
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
|
||||||
|
|
||||||
|
// Generate a random length for the base name
|
||||||
|
let name_len = rng.random_range(5..21);
|
||||||
|
|
||||||
|
// Generate random alphanumeric characters
|
||||||
|
for i in 0..name_len {
|
||||||
|
let char_code = rng.random_range(97..123); // 'a' through 'z'
|
||||||
|
if i < FILENAME_HASH_SIZE {
|
||||||
|
filename_bytes[i] = char_code as u8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append a common extension
|
||||||
|
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
|
||||||
|
let ext_bytes = ext.as_bytes();
|
||||||
|
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
|
||||||
|
if start_index < FILENAME_HASH_SIZE {
|
||||||
|
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
filename_bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type NodeHash = [u8; FILENAME_HASH_SIZE];
|
||||||
|
|
||||||
|
pub fn node_hash_to_hex_string(hash: &NodeHash) -> String {
|
||||||
|
hash.iter()
|
||||||
|
.map(|b| format!("{:02x}", b))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(u8)]
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum MerkleNode {
|
||||||
|
// up to 1024 bytes of raw data.
|
||||||
|
Chunk(ChunkNode) = 0,
|
||||||
|
// 0 to 16 directory entries.
|
||||||
|
Directory(DirectoryNode) = 1,
|
||||||
|
// list of 2 to 32 hashes pointing to Chunk or Big nodes.
|
||||||
|
Big(BigNode) = 3,
|
||||||
|
// list of 2 to 32 hashes pointing to Directory or BigDirectory nodes.
|
||||||
|
BigDirectory(BigDirectoryNode) = 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_random_file_node(storage: &mut HashMap<NodeHash, MerkleNode>) -> Result<NodeHash, String> {
|
||||||
|
let mut rng = rng();
|
||||||
|
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
|
||||||
|
|
||||||
|
if !is_big {
|
||||||
|
// Generate a simple Chunk Node
|
||||||
|
let node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||||
|
let hash = dummy_hash(&node.serialize());
|
||||||
|
storage.insert(hash, node);
|
||||||
|
Ok(hash)
|
||||||
|
} else {
|
||||||
|
// Generate a Big Node (a file composed of chunks)
|
||||||
|
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
|
||||||
|
let mut children_hashes = Vec::with_capacity(num_children);
|
||||||
|
|
||||||
|
for _ in 0..num_children {
|
||||||
|
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
|
||||||
|
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||||
|
let chunk_hash = dummy_hash(&chunk_node.serialize());
|
||||||
|
storage.insert(chunk_hash, chunk_node);
|
||||||
|
children_hashes.push(chunk_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
|
||||||
|
let hash = dummy_hash(&node.serialize());
|
||||||
|
storage.insert(hash, node);
|
||||||
|
Ok(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn generate_random_directory_node(
|
||||||
|
depth: u32,
|
||||||
|
max_depth: u32,
|
||||||
|
storage: &mut HashMap<NodeHash, MerkleNode>
|
||||||
|
) -> Result<NodeHash, String> {
|
||||||
|
let mut rng = rng();
|
||||||
|
let current_depth = depth + 1;
|
||||||
|
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
|
||||||
|
|
||||||
|
if !is_big_dir || current_depth >= max_depth {
|
||||||
|
// Generate a simple Directory Node (leaf level directory)
|
||||||
|
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
|
||||||
|
let mut entries = Vec::with_capacity(num_entries);
|
||||||
|
|
||||||
|
for _ in 0..num_entries {
|
||||||
|
if rng.random_bool(0.7) {
|
||||||
|
// 70% chance of creating a file (Chunk/Big)
|
||||||
|
let file_hash = generate_random_file_node(storage)?;
|
||||||
|
let entry = DirectoryEntry {
|
||||||
|
filename: generate_random_filename(),
|
||||||
|
content_hash: file_hash,
|
||||||
|
};
|
||||||
|
entries.push(entry);
|
||||||
|
} else if current_depth < max_depth {
|
||||||
|
// 30% chance of creating a subdirectory
|
||||||
|
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
|
||||||
|
|
||||||
|
// Create a basic directory entry name
|
||||||
|
let mut filename_bytes = [0; 32];
|
||||||
|
let subdir_name = format!("dir_{}", current_depth);
|
||||||
|
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
|
||||||
|
|
||||||
|
let entry = DirectoryEntry {
|
||||||
|
filename: filename_bytes,
|
||||||
|
content_hash: dir_hash,
|
||||||
|
};
|
||||||
|
entries.push(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
|
||||||
|
let hash = dummy_hash(&node.serialize());
|
||||||
|
storage.insert(hash, node);
|
||||||
|
Ok(hash)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Generate a BigDirectory Node (internal directory structure)
|
||||||
|
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
|
||||||
|
let mut children = Vec::with_capacity(num_children);
|
||||||
|
|
||||||
|
for _ in 0..num_children {
|
||||||
|
// Children must be Directory or BigDirectory
|
||||||
|
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
|
||||||
|
children.push(child_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
|
||||||
|
let hash = dummy_hash(&node.serialize());
|
||||||
|
storage.insert(hash, node);
|
||||||
|
Ok(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ChunkNode {
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
}
|
||||||
|
impl ChunkNode {
|
||||||
|
pub fn new(data: Vec<u8>) -> Result<Self, String> {
|
||||||
|
if data.len() > MAX_CHUNK_DATA_SIZE {
|
||||||
|
return Err(format!("Chunk data exceeds {} bytes", data.len()));
|
||||||
|
}
|
||||||
|
Ok(ChunkNode { data })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new_random() -> Self {
|
||||||
|
let mut rng = rand::rng();
|
||||||
|
|
||||||
|
// Determine a random length between 1 and MAX_CHUNK_DATA_SIZE (inclusive).
|
||||||
|
// Using +1 ensures the range is up to 1024.
|
||||||
|
let random_len = rng.random_range(1..=MAX_CHUNK_DATA_SIZE);
|
||||||
|
|
||||||
|
// Initialize a vector with the random length
|
||||||
|
let mut data = vec![0u8; random_len];
|
||||||
|
|
||||||
|
// Fill the vector with random bytes
|
||||||
|
rng.fill(&mut data[..]);
|
||||||
|
|
||||||
|
// Since we generated the length based on MAX_CHUNK_DATA_SIZE,
|
||||||
|
// this is guaranteed to be valid and doesn't need to return a Result.
|
||||||
|
ChunkNode { data }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper struct
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DirectoryEntry {
|
||||||
|
pub filename: [u8; FILENAME_HASH_SIZE],
|
||||||
|
pub content_hash: NodeHash,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DirectoryNode {
|
||||||
|
pub entries: Vec<DirectoryEntry>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DirectoryNode {
|
||||||
|
pub fn new(entries: Vec<DirectoryEntry>) -> Result<Self, String> {
|
||||||
|
if entries.len() > MAX_DIRECTORY_ENTRIES {
|
||||||
|
return Err(format!("Directory exceeds {} bytes", entries.len()));
|
||||||
|
}
|
||||||
|
Ok(DirectoryNode { entries })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct BigNode {
|
||||||
|
pub children_hashes: Vec<NodeHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BigNode {
|
||||||
|
pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
|
||||||
|
let n = children_hashes.len();
|
||||||
|
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
|
||||||
|
return Err(format!(
|
||||||
|
"Big node must have between {} and {} children, found {}",
|
||||||
|
MIN_BIG_CHILDREN, MAX_BIG_CHILDREN, n
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(BigNode { children_hashes })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct BigDirectoryNode {
|
||||||
|
pub children_hashes: Vec<NodeHash>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BigDirectoryNode {
|
||||||
|
pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
|
||||||
|
let n = children_hashes.len();
|
||||||
|
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
|
||||||
|
return Err(format!(
|
||||||
|
"BigDirectory node must have between {} and {} children, found {}",
|
||||||
|
MIN_BIG_CHILDREN, MAX_BIG_CHILDREN, n
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(BigDirectoryNode { children_hashes })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MerkleNode {
|
||||||
|
pub fn get_type_byte(&self) -> u8 {
|
||||||
|
match self {
|
||||||
|
MerkleNode::Chunk(_) => 0,
|
||||||
|
MerkleNode::Directory(_) => 1,
|
||||||
|
MerkleNode::Big(_) => 3,
|
||||||
|
MerkleNode::BigDirectory(_) => 4,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn serialize(&self) -> Vec<u8> {
|
||||||
|
let mut bytes = Vec::new();
|
||||||
|
// 1. Add the type byte
|
||||||
|
bytes.push(self.get_type_byte());
|
||||||
|
|
||||||
|
// 2. Add the node-specific data
|
||||||
|
match self {
|
||||||
|
MerkleNode::Chunk(node) => {
|
||||||
|
bytes.extend_from_slice(&node.data);
|
||||||
|
}
|
||||||
|
MerkleNode::Directory(node) => {
|
||||||
|
// The data is the sequence of directory entries
|
||||||
|
for entry in &node.entries {
|
||||||
|
bytes.extend_from_slice(&entry.filename);
|
||||||
|
bytes.extend_from_slice(&entry.content_hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MerkleNode::Big(node) => {
|
||||||
|
// The data is the list of child hashes
|
||||||
|
for hash in &node.children_hashes {
|
||||||
|
bytes.extend_from_slice(hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MerkleNode::BigDirectory(node) => {
|
||||||
|
// The data is the list of child hashes
|
||||||
|
for hash in &node.children_hashes {
|
||||||
|
bytes.extend_from_slice(hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_random_tree(max_depth: u32) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
|
||||||
|
let mut storage = HashMap::new();
|
||||||
|
|
||||||
|
// Start tree generation from the root directory at depth 0
|
||||||
|
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
|
||||||
|
|
||||||
|
Ok((root_hash, storage))
|
||||||
|
}
|
||||||
|
}
|
||||||
85
client-network/src/lib.rs
Normal file
85
client-network/src/lib.rs
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
mod data;
|
||||||
|
|
||||||
|
/// Messages sent to the Network thread by the GUI.
|
||||||
|
pub enum NetworkCommand {
|
||||||
|
ConnectPeer(String), // e.g., IP:PORT
|
||||||
|
RequestFileTree(String), // e.g., peer_id
|
||||||
|
RequestDirectoryContent(String, String),
|
||||||
|
RequestChunk(String, String),
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Messages sent to the GUI by the Network thread.
|
||||||
|
pub enum NetworkEvent {
|
||||||
|
PeerConnected(String),
|
||||||
|
PeerListUpdated(Vec<String>),
|
||||||
|
FileTreeReceived(String, Vec<MerkleNode>), // peer_id, content
|
||||||
|
DataReceived(String, MerkleNode),
|
||||||
|
FileTreeRootReceived(String, String),
|
||||||
|
|
||||||
|
// ...
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
use crossbeam_channel::{Receiver, Sender};
|
||||||
|
pub use crate::data::*;
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
|
pub fn calculate_chunk_id(data: &[u8]) -> String {
|
||||||
|
// 1. Create a new Sha256 hasher instance
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
|
||||||
|
// 2. Write the input data into the hasher
|
||||||
|
hasher.update(data);
|
||||||
|
|
||||||
|
// 3. Finalize the hash computation and get the resulting bytes
|
||||||
|
let hash_bytes = hasher.finalize();
|
||||||
|
|
||||||
|
// 4. Convert the hash bytes (array of u8) into a hexadecimal string
|
||||||
|
// This is the common, human-readable format for cryptographic IDs.
|
||||||
|
hex::encode(hash_bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
pub fn start_p2p_executor(
|
||||||
|
cmd_rx: Receiver<NetworkCommand>,
|
||||||
|
event_tx: Sender<NetworkEvent>,
|
||||||
|
) -> tokio::task::JoinHandle<()> {
|
||||||
|
|
||||||
|
// Use tokio to spawn the asynchronous networking logic
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
|
||||||
|
// P2P/Networking Setup goes here
|
||||||
|
|
||||||
|
println!("Network executor started.");
|
||||||
|
|
||||||
|
// Main network loop
|
||||||
|
loop {
|
||||||
|
// Check for commands from the GUI
|
||||||
|
if let Ok(cmd) = cmd_rx.try_recv() {
|
||||||
|
match cmd {
|
||||||
|
NetworkCommand::ConnectPeer(addr) => {
|
||||||
|
println!("Attempting to connect to: {}", addr);
|
||||||
|
// Network logic to connect...
|
||||||
|
// If successful, send an event back:
|
||||||
|
// event_tx.send(NetworkEvent::PeerConnected(addr)).unwrap();
|
||||||
|
},
|
||||||
|
NetworkCommand::RequestFileTree(_) => todo!(),
|
||||||
|
// ... handle other commands
|
||||||
|
NetworkCommand::RequestDirectoryContent(_, _) => todo!(),
|
||||||
|
NetworkCommand::RequestChunk(_, _) => todo!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Poll network for new events (e.g., an incoming connection)
|
||||||
|
// ...
|
||||||
|
// When a new peer is found:
|
||||||
|
// event_tx.send(NetworkEvent::PeerConnected("NewPeerID".to_string())).unwrap();
|
||||||
|
|
||||||
|
// Avoid spinning too fast
|
||||||
|
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
use fltk::{
|
|
||||||
app, prelude::*, window::Window, frame::Frame, group::{Pack, Flex},
|
|
||||||
menu::MenuBar, button::Button, tree::Tree, enums::{FrameType, Align}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct P2PClientGUI {
|
|
||||||
app: app::App,
|
|
||||||
win: Window,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl P2PClientGUI {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let app = app::App::default();
|
|
||||||
let mut win = Window::default()
|
|
||||||
.with_size(900, 600)
|
|
||||||
.with_label("P2P Merkle Client");
|
|
||||||
|
|
||||||
// 1. Main Layout: Flex (Vertical)
|
|
||||||
let mut main_flex = Flex::default_fill().column();
|
|
||||||
|
|
||||||
// 2. Menu Bar (Top)
|
|
||||||
let mut menu_bar = MenuBar::default().with_size(win.width(), 30);
|
|
||||||
menu_bar.add_emit("File/Connect", fltk::enums::Shortcut::None, fltk::menu::MenuFlag::Invisible, app::channel().0, ());
|
|
||||||
menu_bar.add_emit("File/Exit", fltk::enums::Shortcut::None, fltk::menu::MenuFlag::Invisible, app::channel().0, ());
|
|
||||||
menu_bar.end();
|
|
||||||
|
|
||||||
// 3. Content Area: Flex (Horizontal)
|
|
||||||
let mut content_flex = Flex::default().row();
|
|
||||||
|
|
||||||
// --- Central Area (Filesystem Tree)
|
|
||||||
let mut fs_tree = Tree::default().with_label("Filesystem View");
|
|
||||||
fs_tree.set_frame(FrameType::FlatBox);
|
|
||||||
// Placeholder data for the tree
|
|
||||||
fs_tree.add("root/Folder A/File 1");
|
|
||||||
fs_tree.add("root/Folder B/File 2");
|
|
||||||
|
|
||||||
content_flex.fixed(&fs_tree, win.width() - 200); // 700px width
|
|
||||||
|
|
||||||
// --- Right-Sided Panel (Known Peers List)
|
|
||||||
let mut right_panel = Pack::default();
|
|
||||||
right_panel.set_frame(FrameType::EngravedBox);
|
|
||||||
right_panel.set_spacing(5);
|
|
||||||
|
|
||||||
Frame::default().with_label("Known Peers").set_align(Align::Top);
|
|
||||||
// List of Labels (placeholder)
|
|
||||||
Frame::default().with_label("Peer A (Online)").set_frame(FrameType::ThinUpBox);
|
|
||||||
Frame::default().with_label("Peer B (Offline)").set_frame(FrameType::ThinUpBox);
|
|
||||||
|
|
||||||
right_panel.end();
|
|
||||||
content_flex.fixed(&right_panel, 200); // 200px width
|
|
||||||
|
|
||||||
content_flex.end();
|
|
||||||
|
|
||||||
main_flex.end(); // End of main layout
|
|
||||||
|
|
||||||
win.end();
|
|
||||||
win.show();
|
|
||||||
|
|
||||||
Self { app, win }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn run(&mut self) {
|
|
||||||
// Here you would connect signals from the GUI to the network thread.
|
|
||||||
// For now, it just runs the application loop.
|
|
||||||
while self.app.wait() {
|
|
||||||
// Handle GUI events
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
17
src/main.rs
17
src/main.rs
@@ -1,17 +0,0 @@
|
|||||||
// Bring the modules into scope
|
|
||||||
mod gui;
|
|
||||||
mod network;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
// tokio::spawn(async move {
|
|
||||||
// println!("Network Listener started...");
|
|
||||||
// network::init_network().await;
|
|
||||||
// });
|
|
||||||
|
|
||||||
let mut client_gui = gui::P2PClientGUI::new();
|
|
||||||
println!("GUI running...");
|
|
||||||
client_gui.run();
|
|
||||||
|
|
||||||
println!("Application shut down.");
|
|
||||||
}
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mod protocol;
|
|
||||||
mod peers;
|
|
||||||
Reference in New Issue
Block a user