merkel generation + render
This commit is contained in:
Binary file not shown.
|
Before Width: | Height: | Size: 8.4 KiB After Width: | Height: | Size: 1.0 KiB |
@@ -1,73 +1,8 @@
|
|||||||
use client_network::{node_hash_to_hex_string, MerkleNode, NetworkCommand, NetworkEvent, NodeHash};
|
use client_network::{filename_to_string, node_hash_to_hex_string, MerkleNode, MerkleTree, NetworkCommand, NetworkEvent, NodeHash};
|
||||||
use crossbeam_channel::{Receiver, Sender};
|
use crossbeam_channel::{Receiver, Sender};
|
||||||
use egui::{Align, Button, CentralPanel, CollapsingHeader, Context, Layout, ScrollArea, SidePanel, TopBottomPanel, Ui, ViewportCommand};
|
use egui::{Align, Button, CentralPanel, CollapsingHeader, Context, Layout, ScrollArea, SidePanel, TopBottomPanel, Ui, ViewportCommand};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
pub struct FileNode {
|
|
||||||
pub name: String,
|
|
||||||
pub is_dir: bool,
|
|
||||||
pub hash_id: String, // The Merkle root or leaf hash (String)
|
|
||||||
pub children: Option<Vec<FileNode>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fn build_file_node_recursively(
|
|
||||||
hash: &NodeHash,
|
|
||||||
storage: &HashMap<NodeHash, MerkleNode>,
|
|
||||||
name: String,
|
|
||||||
) -> Option<FileNode> {
|
|
||||||
let node = storage.get(hash)?;
|
|
||||||
let hash_id = hex::encode(hash);
|
|
||||||
|
|
||||||
match node {
|
|
||||||
MerkleNode::Directory(dir_node) => {
|
|
||||||
// Recurse through all entries to build children
|
|
||||||
let children: Vec<FileNode> = dir_node.entries.iter().filter_map(|entry| {
|
|
||||||
let filename_lossy = String::from_utf8_lossy(&entry.filename)
|
|
||||||
.trim_end_matches('\0')
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
build_file_node_recursively(&entry.content_hash, storage, filename_lossy)
|
|
||||||
}).collect();
|
|
||||||
|
|
||||||
Some(FileNode {
|
|
||||||
name,
|
|
||||||
is_dir: true,
|
|
||||||
hash_id,
|
|
||||||
children: Some(children),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
MerkleNode::BigDirectory(big_dir_node) => {
|
|
||||||
// In a real system, BigDirectory children would have names stored in an index.
|
|
||||||
// Here, we generate dummy names to show recursion working.
|
|
||||||
let children: Vec<FileNode> = big_dir_node.children_hashes.iter().filter_map(|child_hash| {
|
|
||||||
let dummy_name = format!("chunk_group_{}", &hex::encode(child_hash)[..4]);
|
|
||||||
build_file_node_recursively(child_hash, storage, dummy_name)
|
|
||||||
}).collect();
|
|
||||||
|
|
||||||
Some(FileNode {
|
|
||||||
name,
|
|
||||||
is_dir: true,
|
|
||||||
hash_id,
|
|
||||||
children: Some(children),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// Chunk or Big nodes are files (leaves in the file tree)
|
|
||||||
_ => Some(FileNode {
|
|
||||||
name,
|
|
||||||
is_dir: false,
|
|
||||||
hash_id,
|
|
||||||
children: None,
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
pub fn convert_merkle_to_file_nodes(root_hash: NodeHash, storage: &HashMap<NodeHash, MerkleNode>) -> Option<FileNode> {
|
|
||||||
let root_name = "/".to_string();
|
|
||||||
build_file_node_recursively(&root_hash, storage, root_name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Main Application Struct ---
|
// --- Main Application Struct ---
|
||||||
pub struct P2PClientApp {
|
pub struct P2PClientApp {
|
||||||
// Communication channels
|
// Communication channels
|
||||||
@@ -79,24 +14,23 @@ pub struct P2PClientApp {
|
|||||||
known_peers: Vec<String>,
|
known_peers: Vec<String>,
|
||||||
connect_address_input: String,
|
connect_address_input: String,
|
||||||
|
|
||||||
peer_root_hash: HashMap<String, String>, // peer_id -> root_hash
|
|
||||||
|
|
||||||
// Key: Parent Directory Hash (String), Value: List of children FileNode
|
// Key: Parent Directory Hash (String), Value: List of children FileNode
|
||||||
loaded_tree_nodes: HashMap<String, FileNode>,
|
loaded_fs: HashMap<String, MerkleTree>,
|
||||||
|
|
||||||
// Which peer's tree we are currently displaying
|
// Current peer tree displayed
|
||||||
active_peer_id: Option<String>,
|
active_peer: Option<String>,
|
||||||
active_root_hash: Option<String>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl P2PClientApp {
|
impl P2PClientApp {
|
||||||
pub fn new(cmd_tx: Sender<NetworkCommand>, event_rx: Receiver<NetworkEvent>) -> Self {
|
pub fn new(cmd_tx: Sender<NetworkCommand>, event_rx: Receiver<NetworkEvent>) -> Self {
|
||||||
let (root_hash, tree) = MerkleNode::generate_random_tree(5).expect("Couldn't generate tree");
|
let (root_hash, tree_content) = MerkleNode::generate_random_tree(5).expect("Couldn't generate tree");
|
||||||
let mut peer_root_hash = HashMap::new();
|
let mut peer_root_hash = HashMap::new();
|
||||||
peer_root_hash.insert("bob".to_string(), "yoyoyoyo".to_string());
|
peer_root_hash.insert("bob".to_string(), "yoyoyoyo".to_string());
|
||||||
|
|
||||||
let mut loaded_tree_nodes = HashMap::new();
|
let mut loaded_fs = HashMap::new();
|
||||||
loaded_tree_nodes.insert(node_hash_to_hex_string(&root_hash), convert_merkle_to_file_nodes(root_hash, &tree).expect("Couldn't convert tree"));
|
let tree = MerkleTree::new(tree_content, root_hash);
|
||||||
|
loaded_fs.insert("bob".to_string(), tree);
|
||||||
|
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
network_cmd_tx: cmd_tx,
|
network_cmd_tx: cmd_tx,
|
||||||
@@ -106,10 +40,8 @@ impl P2PClientApp {
|
|||||||
"bob".to_string()
|
"bob".to_string()
|
||||||
],
|
],
|
||||||
connect_address_input: "127.0.0.1:8080".to_string(),
|
connect_address_input: "127.0.0.1:8080".to_string(),
|
||||||
peer_root_hash,
|
loaded_fs,
|
||||||
loaded_tree_nodes,
|
active_peer: None,
|
||||||
active_peer_id: None,
|
|
||||||
active_root_hash: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -124,30 +56,38 @@ impl eframe::App for P2PClientApp {
|
|||||||
while let Ok(event) = self.network_event_rx.try_recv() {
|
while let Ok(event) = self.network_event_rx.try_recv() {
|
||||||
match event {
|
match event {
|
||||||
NetworkEvent::PeerConnected(addr) => {
|
NetworkEvent::PeerConnected(addr) => {
|
||||||
|
todo!();
|
||||||
|
|
||||||
self.status_message = format!("✅ Peer connected: {}", addr);
|
self.status_message = format!("✅ Peer connected: {}", addr);
|
||||||
if !self.known_peers.contains(&addr) {
|
if !self.known_peers.contains(&addr) {
|
||||||
self.known_peers.push(addr);
|
self.known_peers.push(addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
NetworkEvent::PeerListUpdated(peers) => {
|
NetworkEvent::PeerListUpdated(peers) => {
|
||||||
|
todo!();
|
||||||
|
|
||||||
self.known_peers = peers;
|
self.known_peers = peers;
|
||||||
}
|
}
|
||||||
NetworkEvent::FileTreeReceived(_peer_id, _) => {
|
NetworkEvent::FileTreeReceived(_peer_id, _) => {
|
||||||
|
todo!();
|
||||||
|
|
||||||
// self.loaded_tree_nodes.insert(_peer_id, tree);
|
// self.loaded_tree_nodes.insert(_peer_id, tree);
|
||||||
self.status_message = "🔄 File tree updated successfully.".to_string();
|
self.status_message = "🔄 File tree updated successfully.".to_string();
|
||||||
}
|
}
|
||||||
NetworkEvent::FileTreeRootReceived(peer_id, root_hash) => {
|
NetworkEvent::FileTreeRootReceived(peer_id, root_hash) => {
|
||||||
self.status_message = format!("🔄 Received Merkle Root from {}: {}", peer_id, &root_hash[..8]);
|
todo!();
|
||||||
self.peer_root_hash.insert(peer_id.clone(), root_hash.clone());
|
|
||||||
|
|
||||||
self.active_peer_id = Some(peer_id.clone());
|
// self.status_message = format!("🔄 Received Merkle Root from {}: {}", peer_id, &root_hash[..8]);
|
||||||
self.active_root_hash = Some(root_hash.clone());
|
//
|
||||||
|
//
|
||||||
// Request the content of the root directory immediately
|
// self.active_peer_id = Some(peer_id.clone());
|
||||||
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
//
|
||||||
peer_id,
|
//
|
||||||
root_hash,
|
// // Request the content of the root directory immediately
|
||||||
));
|
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
|
// peer_id,
|
||||||
|
// root_hash,
|
||||||
|
// ));
|
||||||
}
|
}
|
||||||
// Handle other events like Disconnect, Error, etc.
|
// Handle other events like Disconnect, Error, etc.
|
||||||
_ => {}
|
_ => {}
|
||||||
@@ -188,24 +128,18 @@ impl eframe::App for P2PClientApp {
|
|||||||
ui.label("No active peers.");
|
ui.label("No active peers.");
|
||||||
} else {
|
} else {
|
||||||
for peer in &self.known_peers {
|
for peer in &self.known_peers {
|
||||||
let is_active = self.active_peer_id.as_ref().map_or(false, |id| id == peer);
|
let is_active = self.active_peer.as_ref().map_or(false, |id| id == peer); // if peer.id == self.active_peer_id
|
||||||
let root_hash_str = self.peer_root_hash.get(peer)
|
|
||||||
.map(|h| format!("Root: {}", &h[..8]))
|
|
||||||
.unwrap_or_else(|| "Root: N/A".to_string());
|
|
||||||
|
|
||||||
if ui.selectable_label(is_active, format!("{} ({})", peer, root_hash_str)).clicked() {
|
|
||||||
// Switch to displaying this peer's tree
|
|
||||||
self.active_peer_id = Some(peer.clone());
|
|
||||||
if let Some(hash) = self.peer_root_hash.get(peer) {
|
|
||||||
self.active_root_hash = Some(hash.clone());
|
|
||||||
|
|
||||||
|
if ui.selectable_label(is_active, format!("{}", peer)).clicked() {
|
||||||
|
// switch to displaying this peer's tree
|
||||||
|
self.active_peer = Some(peer.clone());
|
||||||
// Request root content if not loaded
|
// Request root content if not loaded
|
||||||
if !self.loaded_tree_nodes.contains_key(hash) {
|
if !self.loaded_fs.contains_key(self.active_peer.as_ref().unwrap()) {
|
||||||
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
todo!();
|
||||||
peer.clone(),
|
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
hash.clone(),
|
// peer.clone(),
|
||||||
));
|
// peer.clone(),
|
||||||
}
|
// ));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -214,18 +148,25 @@ impl eframe::App for P2PClientApp {
|
|||||||
});
|
});
|
||||||
|
|
||||||
// 4. Central Panel (Filesystem Tree)
|
// 4. Central Panel (Filesystem Tree)
|
||||||
|
let heading = {
|
||||||
|
if let Some(peer) = &self.active_peer {
|
||||||
|
format!("📂 {}'s tree", peer)
|
||||||
|
} else {
|
||||||
|
"📂 p2p-merkel client".to_string()
|
||||||
|
}
|
||||||
|
};
|
||||||
CentralPanel::default().show(ctx, |ui| {
|
CentralPanel::default().show(ctx, |ui| {
|
||||||
ui.heading("📂 Decentralized File System");
|
ui.heading(heading);
|
||||||
ui.separator();
|
ui.separator();
|
||||||
|
|
||||||
if let Some(root_hash) = &self.active_root_hash {
|
if let Some(active_peer) = &self.active_peer {
|
||||||
if let Some(root_nodes) = self.loaded_tree_nodes.get(root_hash) {
|
if let Some(tree) = self.loaded_fs.get(active_peer) {
|
||||||
ScrollArea::vertical().show(ui, |ui| {
|
ScrollArea::vertical().show(ui, |ui| {
|
||||||
// Start drawing the tree from the root hash
|
// Start drawing the tree from the root hash
|
||||||
self.draw_file_tree(ui, root_nodes, 0);
|
self.draw_file_tree(ui, tree);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
ui.label(format!("Loading root content for hash: {}", &root_hash[..8]));
|
ui.label(format!("Loading root for peer: {}", active_peer));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ui.label("Connect to a peer to view a file tree.");
|
ui.label("Connect to a peer to view a file tree.");
|
||||||
@@ -244,56 +185,125 @@ impl eframe::App for P2PClientApp {
|
|||||||
// --- Helper for Drawing the Recursive File Tree ---
|
// --- Helper for Drawing the Recursive File Tree ---
|
||||||
|
|
||||||
impl P2PClientApp {
|
impl P2PClientApp {
|
||||||
fn draw_file_tree(&self, ui: &mut Ui, node: &FileNode, depth: usize) {
|
// fn draw_file_tree(&self, ui: &mut Ui, node: &FileNode, depth: usize) {
|
||||||
let indent_space = 15.0 * depth as f32;
|
// let indent_space = 15.0 * depth as f32;
|
||||||
let active_peer_id = self.active_peer_id.clone();
|
// let active_peer_id = self.active_peer_id.clone();
|
||||||
|
//
|
||||||
|
// let entry_hash = &node.hash_id;
|
||||||
|
// let filename = &node.name;
|
||||||
|
// let is_dir = node.is_dir;
|
||||||
|
//
|
||||||
|
// if is_dir {
|
||||||
|
// // --- Directory Node: Check if content (children) is already loaded (stored in the map) ---
|
||||||
|
//
|
||||||
|
// if let Some(children) = node.children.as_ref() {
|
||||||
|
// // Content is already loaded: draw the collapsing header and recurse
|
||||||
|
// CollapsingHeader::new(format!("📁 {}", filename))
|
||||||
|
// .default_open(false)
|
||||||
|
// .enabled(true)
|
||||||
|
// .show(ui, |ui| {
|
||||||
|
// // Recursive call: iterate over children and call draw_file_tree for each
|
||||||
|
// for child_node in children {
|
||||||
|
// self.draw_file_tree(ui, child_node, depth + 1);
|
||||||
|
// }
|
||||||
|
// });
|
||||||
|
// } else {
|
||||||
|
// // Content is NOT loaded: show a clickable button to request loading
|
||||||
|
// let response = ui.with_layout(Layout::left_to_right(Align::Min), |ui| {
|
||||||
|
// ui.add_space(indent_space);
|
||||||
|
// ui.add(Button::new(format!("▶️ {} (Load)", filename)).small()).on_hover_text(format!("Hash: {}...", &entry_hash[..8]));
|
||||||
|
// }).response;
|
||||||
|
//
|
||||||
|
// if response.clicked() {
|
||||||
|
// if let Some(peer_id) = active_peer_id.clone() {
|
||||||
|
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
||||||
|
// peer_id,
|
||||||
|
// entry_hash.clone(),
|
||||||
|
// ));
|
||||||
|
// // self.status_message = format!("Requested directory content for: {}...", &entry_hash[..8]);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// // --- File Node (Chunk or Big) ---
|
||||||
|
// ui.with_layout(Layout::left_to_right(Align::Center), |ui| {
|
||||||
|
// ui.add_space(indent_space);
|
||||||
|
// if ui.selectable_label(false, format!("📄 {} (Hash: {}...)", filename, &entry_hash[..8])).on_hover_text("Click to request file chunks...").clicked() {
|
||||||
|
// if let Some(peer_id) = active_peer_id.clone() {
|
||||||
|
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestChunk(peer_id, entry_hash.clone()));
|
||||||
|
// // self.status_message = format!("Requested file chunks for: {}...", &entry_hash[..8]);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// });
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
let entry_hash = &node.hash_id;
|
fn draw_file_tree(&self, ui: &mut Ui, tree: &MerkleTree) {
|
||||||
let filename = &node.name;
|
assert!(self.active_peer.is_some());
|
||||||
let is_dir = node.is_dir;
|
assert!(self.loaded_fs.get(&self.active_peer.clone().unwrap()).is_some());
|
||||||
|
let root = tree.root;
|
||||||
|
CollapsingHeader::new(format!("📁 root"))
|
||||||
|
.default_open(true)
|
||||||
|
.enabled(true)
|
||||||
|
.show(ui, |ui| {
|
||||||
|
self.draw_file_node(ui, root, tree,0, None);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if is_dir {
|
fn draw_file_node(&self, ui: &mut Ui, to_draw: NodeHash, tree: &MerkleTree, depth: usize, filename: Option<[u8; 32]>) {
|
||||||
// --- Directory Node: Check if content (children) is already loaded (stored in the map) ---
|
if depth >= 32 {
|
||||||
|
return;
|
||||||
if let Some(children) = node.children.as_ref() {
|
}
|
||||||
// Content is already loaded: draw the collapsing header and recurse
|
if let Some(current) = tree.data.get(&to_draw) {
|
||||||
CollapsingHeader::new(format!("📁 {}", filename))
|
let name = {
|
||||||
|
if filename.is_some() {
|
||||||
|
filename_to_string(filename.unwrap())
|
||||||
|
} else {
|
||||||
|
node_hash_to_hex_string(&to_draw)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
match current {
|
||||||
|
MerkleNode::Chunk(node) => {
|
||||||
|
if ui.selectable_label(false, format!("📄 (C) {}...", name)).on_hover_text("Click to request file chunks...").clicked() {
|
||||||
|
todo!();
|
||||||
|
// if let Some(peer_id) = active_peer_id.clone() {
|
||||||
|
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestChunk(peer_id, entry_hash.clone()));
|
||||||
|
// // self.status_message = format!("Requested file chunks for: {}...", &entry_hash[..8]);
|
||||||
|
// }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MerkleNode::Directory(node) => {
|
||||||
|
CollapsingHeader::new(format!("📁 (D) {}", name))
|
||||||
.default_open(false)
|
.default_open(false)
|
||||||
.enabled(true)
|
.enabled(true)
|
||||||
.show(ui, |ui| {
|
.show(ui, |ui| {
|
||||||
// Recursive call: iterate over children and call draw_file_tree for each
|
for entry in &node.entries {
|
||||||
for child_node in children {
|
self.draw_file_node(ui, entry.content_hash, tree, depth + 1, Some(entry.filename));
|
||||||
self.draw_file_tree(ui, child_node, depth + 1);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
}
|
||||||
// Content is NOT loaded: show a clickable button to request loading
|
MerkleNode::Big(node) => {
|
||||||
let response = ui.with_layout(Layout::left_to_right(Align::Min), |ui| {
|
|
||||||
ui.add_space(indent_space);
|
|
||||||
ui.add(Button::new(format!("▶️ {} (Load)", filename)).small()).on_hover_text(format!("Hash: {}...", &entry_hash[..8]));
|
|
||||||
}).response;
|
|
||||||
|
|
||||||
if response.clicked() {
|
CollapsingHeader::new(format!("📄 (B) {}", name))
|
||||||
if let Some(peer_id) = active_peer_id.clone() {
|
.default_open(false)
|
||||||
let _ = self.network_cmd_tx.send(NetworkCommand::RequestDirectoryContent(
|
.enabled(true)
|
||||||
peer_id,
|
.show(ui, |ui| {
|
||||||
entry_hash.clone(),
|
for child in &node.children_hashes {
|
||||||
));
|
self.draw_file_node(ui, child.clone(), tree, depth + 1, None);
|
||||||
// self.status_message = format!("Requested directory content for: {}...", &entry_hash[..8]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// --- File Node (Chunk or Big) ---
|
|
||||||
ui.with_layout(Layout::left_to_right(Align::Center), |ui| {
|
|
||||||
ui.add_space(indent_space);
|
|
||||||
if ui.selectable_label(false, format!("📄 {} (Hash: {}...)", filename, &entry_hash[..8])).on_hover_text("Click to request file chunks...").clicked() {
|
|
||||||
if let Some(peer_id) = active_peer_id.clone() {
|
|
||||||
let _ = self.network_cmd_tx.send(NetworkCommand::RequestChunk(peer_id, entry_hash.clone()));
|
|
||||||
// self.status_message = format!("Requested file chunks for: {}...", &entry_hash[..8]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
MerkleNode::BigDirectory(node) => {
|
||||||
|
CollapsingHeader::new(format!("📁 (BD) {}", name))
|
||||||
|
.default_open(false)
|
||||||
|
.enabled(true)
|
||||||
|
.show(ui, |ui| {
|
||||||
|
for child in &node.children_hashes {
|
||||||
|
self.draw_file_node(ui, child.clone(), tree, depth + 1, None);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -26,7 +26,7 @@ async fn main() -> eframe::Result<()> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
eframe::run_native(
|
eframe::run_native(
|
||||||
"Rust P2P Client (Merkle Tree Sync)",
|
"p2p-merkle client",
|
||||||
options,
|
options,
|
||||||
Box::new(|cc| {
|
Box::new(|cc| {
|
||||||
let app = P2PClientApp::new(network_cmd_tx, network_event_rx);
|
let app = P2PClientApp::new(network_cmd_tx, network_event_rx);
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||||
use rand::{rng, Rng};
|
use rand::{rng, Rng};
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
// --- Constants ---
|
// --- Constants ---
|
||||||
const MAX_CHUNK_DATA_SIZE: usize = 1024;
|
const MAX_CHUNK_DATA_SIZE: usize = 1024;
|
||||||
@@ -10,7 +11,7 @@ const MIN_BIG_CHILDREN: usize = 2;
|
|||||||
const FILENAME_HASH_SIZE: usize = 32;
|
const FILENAME_HASH_SIZE: usize = 32;
|
||||||
const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
|
const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
|
||||||
|
|
||||||
fn dummy_hash(data: &[u8]) -> NodeHash {
|
fn hash(data: &[u8]) -> NodeHash {
|
||||||
let mut hasher = DefaultHasher::new();
|
let mut hasher = DefaultHasher::new();
|
||||||
data.hash(&mut hasher);
|
data.hash(&mut hasher);
|
||||||
let hash_u64 = hasher.finish();
|
let hash_u64 = hasher.finish();
|
||||||
@@ -70,6 +71,18 @@ pub enum MerkleNode {
|
|||||||
BigDirectory(BigDirectoryNode) = 4,
|
BigDirectory(BigDirectoryNode) = 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct MerkleTree {
|
||||||
|
pub data: HashMap<NodeHash, MerkleNode>,
|
||||||
|
pub root: NodeHash,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MerkleTree {
|
||||||
|
pub fn new(data: HashMap<NodeHash, MerkleNode>, root: NodeHash) -> MerkleTree {
|
||||||
|
MerkleTree { data, root }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn generate_random_file_node(storage: &mut HashMap<NodeHash, MerkleNode>) -> Result<NodeHash, String> {
|
fn generate_random_file_node(storage: &mut HashMap<NodeHash, MerkleNode>) -> Result<NodeHash, String> {
|
||||||
let mut rng = rng();
|
let mut rng = rng();
|
||||||
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
|
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
|
||||||
@@ -77,7 +90,7 @@ fn generate_random_file_node(storage: &mut HashMap<NodeHash, MerkleNode>) -> Res
|
|||||||
if !is_big {
|
if !is_big {
|
||||||
// Generate a simple Chunk Node
|
// Generate a simple Chunk Node
|
||||||
let node = MerkleNode::Chunk(ChunkNode::new_random());
|
let node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||||
let hash = dummy_hash(&node.serialize());
|
let hash = hash(&node.serialize());
|
||||||
storage.insert(hash, node);
|
storage.insert(hash, node);
|
||||||
Ok(hash)
|
Ok(hash)
|
||||||
} else {
|
} else {
|
||||||
@@ -88,13 +101,13 @@ fn generate_random_file_node(storage: &mut HashMap<NodeHash, MerkleNode>) -> Res
|
|||||||
for _ in 0..num_children {
|
for _ in 0..num_children {
|
||||||
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
|
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
|
||||||
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
|
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||||
let chunk_hash = dummy_hash(&chunk_node.serialize());
|
let chunk_hash = hash(&chunk_node.serialize());
|
||||||
storage.insert(chunk_hash, chunk_node);
|
storage.insert(chunk_hash, chunk_node);
|
||||||
children_hashes.push(chunk_hash);
|
children_hashes.push(chunk_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
|
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
|
||||||
let hash = dummy_hash(&node.serialize());
|
let hash = hash(&node.serialize());
|
||||||
storage.insert(hash, node);
|
storage.insert(hash, node);
|
||||||
Ok(hash)
|
Ok(hash)
|
||||||
}
|
}
|
||||||
@@ -141,7 +154,7 @@ fn generate_random_directory_node(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
|
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
|
||||||
let hash = dummy_hash(&node.serialize());
|
let hash = hash(&node.serialize());
|
||||||
storage.insert(hash, node);
|
storage.insert(hash, node);
|
||||||
Ok(hash)
|
Ok(hash)
|
||||||
|
|
||||||
@@ -157,7 +170,7 @@ fn generate_random_directory_node(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
|
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
|
||||||
let hash = dummy_hash(&node.serialize());
|
let hash = hash(&node.serialize());
|
||||||
storage.insert(hash, node);
|
storage.insert(hash, node);
|
||||||
Ok(hash)
|
Ok(hash)
|
||||||
}
|
}
|
||||||
@@ -201,6 +214,11 @@ pub struct DirectoryEntry {
|
|||||||
pub content_hash: NodeHash,
|
pub content_hash: NodeHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn filename_to_string(filename: [u8; FILENAME_HASH_SIZE]) -> String {
|
||||||
|
let end_index = filename.iter().position(|&b| b == 0).unwrap_or(FILENAME_HASH_SIZE);
|
||||||
|
String::from_utf8_lossy(&filename[..end_index]).to_string()
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct DirectoryNode {
|
pub struct DirectoryNode {
|
||||||
pub entries: Vec<DirectoryEntry>,
|
pub entries: Vec<DirectoryEntry>,
|
||||||
|
|||||||
@@ -61,15 +61,23 @@ pub fn start_p2p_executor(
|
|||||||
if let Ok(cmd) = cmd_rx.try_recv() {
|
if let Ok(cmd) = cmd_rx.try_recv() {
|
||||||
match cmd {
|
match cmd {
|
||||||
NetworkCommand::ConnectPeer(addr) => {
|
NetworkCommand::ConnectPeer(addr) => {
|
||||||
println!("Attempting to connect to: {}", addr);
|
println!("[Network] ConnectPeer() called");
|
||||||
|
println!("[Network] Attempting to connect to: {}", addr);
|
||||||
// Network logic to connect...
|
// Network logic to connect...
|
||||||
// If successful, send an event back:
|
// If successful, send an event back:
|
||||||
// event_tx.send(NetworkEvent::PeerConnected(addr)).unwrap();
|
// event_tx.send(NetworkEvent::PeerConnected(addr)).unwrap();
|
||||||
},
|
},
|
||||||
NetworkCommand::RequestFileTree(_) => todo!(),
|
NetworkCommand::RequestFileTree(_) => {
|
||||||
|
println!("[Network] RequestFileTree() called");
|
||||||
|
},
|
||||||
|
|
||||||
// ... handle other commands
|
// ... handle other commands
|
||||||
NetworkCommand::RequestDirectoryContent(_, _) => todo!(),
|
NetworkCommand::RequestDirectoryContent(_, _) => {
|
||||||
NetworkCommand::RequestChunk(_, _) => todo!(),
|
println!("[Network] RequestDirectoryContent() called");
|
||||||
|
},
|
||||||
|
NetworkCommand::RequestChunk(_, _) => {
|
||||||
|
println!("[Network] RequestChunk() called");
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user