Compare commits
12 Commits
cb2e89b1e9
...
a3648c2116
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3648c2116 | ||
|
|
f8e3e46672 | ||
|
|
9ba752641b | ||
|
|
5899a275a2 | ||
|
|
da29d67472 | ||
|
|
b465608797 | ||
|
|
732daf0578 | ||
|
|
65447912bf | ||
|
|
c928d98b56 | ||
|
|
31b26e96b0 | ||
|
|
26fa7a833f | ||
|
|
fb2c3310af |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
/target
|
||||
target/
|
||||
|
||||
BIN
Download/12.text
Normal file
BIN
Download/12.text
Normal file
Binary file not shown.
BIN
Download/13.text
Normal file
BIN
Download/13.text
Normal file
Binary file not shown.
BIN
Download/README
Normal file
BIN
Download/README
Normal file
Binary file not shown.
@@ -1,14 +1,17 @@
|
||||
use client_network::{
|
||||
ChunkNode, MerkleNode, MerkleTree, NetworkCommand, NetworkEvent, NodeHash, filename_to_string,
|
||||
node_hash_to_hex_string,
|
||||
ChunkNode, MerkleNode, MerkleTree, NetworkCommand, NetworkEvent, NodeHash,
|
||||
big_or_chunk_to_file, filename_to_string, generate_base_tree, node_hash_to_hex_string,
|
||||
remove_null_bytes,
|
||||
};
|
||||
use crossbeam_channel::{Receiver, Sender};
|
||||
use egui::{
|
||||
Align, Align2, Button, CentralPanel, CollapsingHeader, Color32, Context, CornerRadius, Frame,
|
||||
Id, LayerId, Layout, Order, Popup, Response, ScrollArea, SidePanel, Stroke, TextStyle,
|
||||
TopBottomPanel, Ui, ViewportCommand,
|
||||
CentralPanel, CollapsingHeader, Color32, Context, CornerRadius, Frame, Response, ScrollArea,
|
||||
SidePanel, Stroke, TopBottomPanel, Ui, ViewportCommand,
|
||||
};
|
||||
use std::{collections::HashMap, fmt::format};
|
||||
use std::collections::HashSet;
|
||||
use std::{collections::HashMap, fmt::format, io::Seek};
|
||||
|
||||
use std::fs::{File, OpenOptions, create_dir};
|
||||
|
||||
enum ServerStatus {
|
||||
Loading,
|
||||
@@ -35,6 +38,7 @@ pub struct P2PClientApp {
|
||||
|
||||
// Key: Parent Directory Hash (String), Value: List of children FileNode
|
||||
loaded_fs: HashMap<String, MerkleTree>,
|
||||
shared_tree: MerkleTree,
|
||||
|
||||
// Current peer tree displayed
|
||||
active_peer: Option<String>,
|
||||
@@ -45,8 +49,11 @@ pub struct P2PClientApp {
|
||||
|
||||
error_message: Option<String>, // Some(message) -> afficher, None -> rien
|
||||
success_message: Option<String>, // Some(message) -> afficher, None -> rien
|
||||
//
|
||||
active_server: String,
|
||||
|
||||
current_downloading_file_map: MerkleTree,
|
||||
remaining_chunks: HashSet<[u8; 32]>,
|
||||
root_downloading_file: String,
|
||||
}
|
||||
|
||||
impl P2PClientApp {
|
||||
@@ -54,6 +61,7 @@ impl P2PClientApp {
|
||||
//let (root_hash, tree_content) = MerkleNode::generate_base_tree();
|
||||
|
||||
let mut loaded_fs = HashMap::new();
|
||||
let mut current_downloading_file_map = MerkleTree::new(HashMap::new(), [0; 32]);
|
||||
//let tree = MerkleTree::new(tree_content, root_hash);
|
||||
//loaded_fs.insert("bob".to_string(), tree);
|
||||
|
||||
@@ -75,6 +83,10 @@ impl P2PClientApp {
|
||||
success_message: None,
|
||||
connect_name_input: "bob".to_string(),
|
||||
active_server: "".to_string(),
|
||||
shared_tree: generate_base_tree(),
|
||||
current_downloading_file_map: current_downloading_file_map,
|
||||
root_downloading_file: "".to_string(),
|
||||
remaining_chunks: HashSet::new(),
|
||||
}
|
||||
}
|
||||
pub fn show_error(&mut self, msg: impl Into<String>) {
|
||||
@@ -124,13 +136,70 @@ impl eframe::App for P2PClientApp {
|
||||
self.known_peers.push((addr, true));
|
||||
}
|
||||
}
|
||||
NetworkEvent::RootRequest(addr) => {
|
||||
let root = self.shared_tree.root;
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::SendRootReply(root.to_vec(), addr));
|
||||
}
|
||||
NetworkEvent::DatumRequest(node_hash, addr) => {
|
||||
let hash: NodeHash = node_hash.try_into().expect("incorrect size");
|
||||
let asked_datum = self.shared_tree.data.get(&hash);
|
||||
match asked_datum {
|
||||
Some(datum_found) => {
|
||||
let _ = self.network_cmd_tx.send(NetworkCommand::SendDatum(
|
||||
datum_found.clone(),
|
||||
node_hash,
|
||||
addr,
|
||||
));
|
||||
}
|
||||
None => {
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::SendNoDatum(node_hash.to_vec(), addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkEvent::PeerListUpdated(peers) => {
|
||||
//todo!();
|
||||
|
||||
self.known_peers = peers;
|
||||
}
|
||||
NetworkEvent::FileTreeReceived(node_hash, merklenode) => {
|
||||
//self.status_message = "🔄 File tree updated successfully.".to_string();
|
||||
|
||||
NetworkEvent::FileTreeReceived(node_hash, merklenode, ip) => {
|
||||
match &self.active_peer {
|
||||
Some(active_peer) => {
|
||||
if let Some(maptree) = self.loaded_fs.get_mut(active_peer) {
|
||||
maptree.data.insert(node_hash, merklenode.clone());
|
||||
match merklenode {
|
||||
MerkleNode::Directory(d) => {
|
||||
for entry in d.entries {
|
||||
let _ = self.network_cmd_tx.send(
|
||||
NetworkCommand::GetChildren(
|
||||
entry.content_hash,
|
||||
ip.clone(),
|
||||
false,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
MerkleNode::BigDirectory(bigd) => {
|
||||
for entry in bigd.children_hashes {
|
||||
let _ = self.network_cmd_tx.send(
|
||||
NetworkCommand::GetChildren(
|
||||
entry,
|
||||
ip.clone(),
|
||||
false,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
NetworkEvent::FileTreeRootReceived(peer_id, root_hash) => {
|
||||
// todo!();
|
||||
@@ -142,8 +211,10 @@ impl eframe::App for P2PClientApp {
|
||||
);*/
|
||||
|
||||
if let Ok(chunknode) = ChunkNode::new(Vec::new()) {
|
||||
let mut data_map: HashMap<NodeHash, MerkleNode> = HashMap::new();
|
||||
data_map.insert(root_hash, MerkleNode::Chunk(chunknode));
|
||||
let data_map: HashMap<NodeHash, MerkleNode> = HashMap::new();
|
||||
//data_map.insert(root_hash, MerkleNode::Chunk(chunknode));
|
||||
println!("len root: {}", data_map.len());
|
||||
println!("node hash: {:?}", root_hash.to_vec());
|
||||
let tree = MerkleTree {
|
||||
data: data_map,
|
||||
root: root_hash,
|
||||
@@ -184,10 +255,53 @@ impl eframe::App for P2PClientApp {
|
||||
NetworkEvent::Error(err) => {
|
||||
self.show_error(err);
|
||||
}
|
||||
NetworkEvent::InitDownload(hash, ip) => {
|
||||
if let Some(addr) = &self.active_peer {
|
||||
if let Some(roottree) = self.loaded_fs.get(addr) {
|
||||
if let Some(root) = roottree.data.get(&hash) {
|
||||
let _ = self
|
||||
.current_downloading_file_map
|
||||
.data
|
||||
.insert(hash, root.clone());
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::GetChildren(hash, ip, true));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkEvent::DataReceived(hash, merkle_node, ip) => {
|
||||
let _ = self
|
||||
.current_downloading_file_map
|
||||
.data
|
||||
.insert(hash, merkle_node.clone());
|
||||
|
||||
println!("merkle:{}", merkle_node.get_type_byte());
|
||||
match merkle_node {
|
||||
MerkleNode::Big(bigfile) => {
|
||||
for entry in bigfile.children_hashes {
|
||||
println!("entry: {:?}", entry);
|
||||
let _ = self.network_cmd_tx.send(NetworkCommand::GetChildren(
|
||||
entry,
|
||||
ip.clone(),
|
||||
true,
|
||||
));
|
||||
self.remaining_chunks.insert(entry);
|
||||
}
|
||||
self.remaining_chunks.remove(&hash);
|
||||
}
|
||||
MerkleNode::Chunk(chunk) => {
|
||||
self.remaining_chunks.remove(&hash);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
if self.remaining_chunks.is_empty() {
|
||||
println!("bigfile téléchargé");
|
||||
}
|
||||
}
|
||||
NetworkEvent::Success(msg) => {
|
||||
self.show_success(msg);
|
||||
}
|
||||
NetworkEvent::DataReceived(_, merkle_node) => todo!(),
|
||||
NetworkEvent::HandshakeFailed() => {}
|
||||
NetworkEvent::ServerHandshakeFailed(err) => {
|
||||
self.active_server = "".to_string();
|
||||
@@ -254,64 +368,7 @@ impl eframe::App for P2PClientApp {
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
/* ui.horizontal(|ui| {
|
||||
ui.label("Server peer name:");
|
||||
ui.text_edit_singleline(&mut self.connect_server_name_input);
|
||||
if ui.button("Connect").clicked() {
|
||||
let addr = self.connect_address_input.clone();
|
||||
let serv_name = self.connect_server_name_input.clone();
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::ConnectToServer(addr, serv_name));
|
||||
self.server_status = ServerStatus::Loading;
|
||||
ui.close();
|
||||
}
|
||||
});*/
|
||||
});
|
||||
|
||||
// état
|
||||
|
||||
/*if ui.button("Network").clicked() {
|
||||
self.show_network_popup = true;
|
||||
}*/
|
||||
|
||||
/*if self.show_network_popup {
|
||||
egui::Window::new("Network")
|
||||
.collapsible(false)
|
||||
.resizable(false)
|
||||
.show(ctx, |ui| {
|
||||
ui.horizontal_wrapped(|ui| {
|
||||
ui.with_layout(
|
||||
egui::Layout::right_to_left(egui::Align::TOP),
|
||||
|ui| {
|
||||
if ui.button("✕").clicked() {
|
||||
self.show_network_popup = false;
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Server IP:");
|
||||
ui.text_edit_singleline(&mut self.connect_address_input);
|
||||
});
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Server peer name:");
|
||||
ui.text_edit_singleline(&mut self.connect_server_name_input);
|
||||
if ui.button("Connect").clicked() {
|
||||
// envoyer commande...
|
||||
let addr = self.connect_address_input.clone();
|
||||
let serv_name = self.connect_server_name_input.clone();
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::ConnectToServer(addr, serv_name));
|
||||
self.server_status = ServerStatus::Loading;
|
||||
|
||||
self.show_network_popup = false;
|
||||
}
|
||||
});
|
||||
});
|
||||
}*/
|
||||
});
|
||||
});
|
||||
|
||||
@@ -359,6 +416,11 @@ impl eframe::App for P2PClientApp {
|
||||
error.to_string()
|
||||
);
|
||||
}
|
||||
if let Some(active_peer) = &self.active_peer {
|
||||
if let Some(tree) = self.loaded_fs.get(active_peer) {
|
||||
println!("{}", tree.data.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -385,7 +447,7 @@ impl eframe::App for P2PClientApp {
|
||||
let internal = frame.show(ui, |ui| {
|
||||
ui.selectable_label(is_active, format!("{}", peer.0))
|
||||
});
|
||||
selectable = internal.response;
|
||||
selectable = internal.inner;
|
||||
} else {
|
||||
selectable = ui.selectable_label(is_active, format!("{}", peer.0));
|
||||
}
|
||||
@@ -551,11 +613,12 @@ impl P2PClientApp {
|
||||
if let Some(current) = tree.data.get(&to_draw) {
|
||||
let name = {
|
||||
if filename.is_some() {
|
||||
filename_to_string(filename.unwrap())
|
||||
String::from_utf8(filename.unwrap().to_vec()).expect("err")
|
||||
} else {
|
||||
node_hash_to_hex_string(&to_draw)
|
||||
}
|
||||
};
|
||||
|
||||
match current {
|
||||
MerkleNode::Chunk(node) => {
|
||||
if ui
|
||||
@@ -563,11 +626,30 @@ impl P2PClientApp {
|
||||
.on_hover_text("Click to request file chunks...")
|
||||
.clicked()
|
||||
{
|
||||
todo!();
|
||||
match create_dir("./Download/") {
|
||||
Ok(_) => println!("Directory created successfully!"),
|
||||
Err(e) => println!("Failed to create directory: {}", e),
|
||||
}
|
||||
|
||||
let new_name = format!("./Download/{}", name);
|
||||
|
||||
let sani = remove_null_bytes(&new_name);
|
||||
|
||||
println!("sani:{}", sani);
|
||||
|
||||
let mut file = OpenOptions::new()
|
||||
.append(true)
|
||||
.create(true)
|
||||
.open(sani)
|
||||
.unwrap();
|
||||
|
||||
big_or_chunk_to_file(tree, &MerkleNode::Chunk(node.clone()), &mut file);
|
||||
|
||||
// if let Some(peer_id) = active_peer_id.clone() {
|
||||
// let _ = self.network_cmd_tx.send(NetworkCommand::RequestChunk(peer_id, entry_hash.clone()));
|
||||
// // self.status_message = format!("Requested file chunks for: {}...", &entry_hash[..8]);
|
||||
// }
|
||||
// todo!();
|
||||
}
|
||||
}
|
||||
MerkleNode::Directory(node) => {
|
||||
@@ -581,26 +663,23 @@ impl P2PClientApp {
|
||||
entry.content_hash,
|
||||
tree,
|
||||
depth + 1,
|
||||
Some(
|
||||
entry
|
||||
.filename
|
||||
.as_slice()
|
||||
.try_into()
|
||||
.expect("incorrect size"),
|
||||
),
|
||||
Some(entry.filename.try_into().expect("incorrect size")),
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
MerkleNode::Big(node) => {
|
||||
CollapsingHeader::new(format!("📄 (B) {}", name))
|
||||
.default_open(false)
|
||||
.enabled(true)
|
||||
.show(ui, |ui| {
|
||||
for child in &node.children_hashes {
|
||||
self.draw_file_node(ui, child.clone(), tree, depth + 1, None);
|
||||
}
|
||||
});
|
||||
if ui
|
||||
.selectable_label(false, format!("📄 (B) {}", name))
|
||||
.on_hover_text("Click to request file chunks...")
|
||||
.clicked()
|
||||
{
|
||||
if let Some(addr) = &self.active_peer {
|
||||
let _ = self
|
||||
.network_cmd_tx
|
||||
.send(NetworkCommand::InitDownload(to_draw, addr.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
MerkleNode::BigDirectory(node) => {
|
||||
CollapsingHeader::new(format!("📁 (BD) {}", name))
|
||||
|
||||
@@ -134,7 +134,7 @@ pub fn sign_message(crypto_pair: &CryptographicSignature, message: &Vec<u8>) ->
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
/*
|
||||
///
|
||||
/// creates a cryptographic signature
|
||||
///
|
||||
@@ -144,7 +144,7 @@ mod tests {
|
||||
let crypto_pair = CryptographicSignature::new(username);
|
||||
let formatted_pubkey = formatPubKey(crypto_pair);
|
||||
println!("pubkey : {}", formatted_pubkey);
|
||||
}
|
||||
}*/
|
||||
|
||||
/*#[test]
|
||||
fn signing_message() {
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
use rand::{Rng, rng};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
|
||||
use std::fs::{File, OpenOptions, create_dir};
|
||||
use std::io::{self, Write};
|
||||
|
||||
use std::env;
|
||||
|
||||
use crate::data;
|
||||
|
||||
// --- Constants ---
|
||||
pub const MAX_CHUNK_DATA_SIZE: usize = 1024;
|
||||
pub const MAX_DIRECTORY_ENTRIES: usize = 16;
|
||||
@@ -24,9 +32,9 @@ pub enum MerkleNode {
|
||||
// 0 to 16 directory entries.
|
||||
Directory(DirectoryNode) = 1,
|
||||
// list of 2 to 32 hashes pointing to Chunk or Big nodes.
|
||||
Big(BigNode) = 3,
|
||||
Big(BigNode) = 2,
|
||||
// list of 2 to 32 hashes pointing to Directory or BigDirectory nodes.
|
||||
BigDirectory(BigDirectoryNode) = 4,
|
||||
BigDirectory(BigDirectoryNode) = 3,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -39,6 +47,9 @@ impl MerkleTree {
|
||||
pub fn new(data: HashMap<NodeHash, MerkleNode>, root: NodeHash) -> MerkleTree {
|
||||
MerkleTree { data, root }
|
||||
}
|
||||
pub fn clear_data(&mut self) {
|
||||
self.data.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -176,3 +187,336 @@ impl MerkleNode {
|
||||
bytes
|
||||
}
|
||||
}
|
||||
|
||||
fn hash(data: &[u8]) -> NodeHash {
|
||||
let root_hash = Sha256::digest(&data);
|
||||
println!("root hash: {:?}", root_hash);
|
||||
let res: NodeHash = root_hash.try_into().expect("incorrect size");
|
||||
res
|
||||
/*let mut hasher = DefaultHasher::new();
|
||||
data.hash(&mut hasher);
|
||||
let hash_u64 = hasher.finish();
|
||||
|
||||
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
|
||||
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
|
||||
for i in 0..8 {
|
||||
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
|
||||
}
|
||||
hash_array // The rest remains 0, satisfying the 32-byte requirement
|
||||
*/
|
||||
}
|
||||
|
||||
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
|
||||
let mut rng = rand::rng();
|
||||
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
|
||||
|
||||
// Generate a random length for the base name
|
||||
let name_len = rng.random_range(5..21);
|
||||
|
||||
// Generate random alphanumeric characters
|
||||
for i in 0..name_len {
|
||||
let char_code = rng.random_range(97..123); // 'a' through 'z'
|
||||
if i < FILENAME_HASH_SIZE {
|
||||
filename_bytes[i] = char_code as u8;
|
||||
}
|
||||
}
|
||||
|
||||
// Append a common extension
|
||||
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
|
||||
let ext_bytes = ext.as_bytes();
|
||||
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
|
||||
if start_index < FILENAME_HASH_SIZE {
|
||||
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
|
||||
}
|
||||
|
||||
filename_bytes
|
||||
}
|
||||
|
||||
fn generate_random_file_node(
|
||||
storage: &mut HashMap<NodeHash, MerkleNode>,
|
||||
) -> Result<NodeHash, String> {
|
||||
let mut rng = rng();
|
||||
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
|
||||
|
||||
if !is_big {
|
||||
// Generate a simple Chunk Node
|
||||
let node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||
let hash = hash(&node.serialize());
|
||||
storage.insert(hash, node);
|
||||
Ok(hash)
|
||||
} else {
|
||||
// Generate a Big Node (a file composed of chunks)
|
||||
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
|
||||
let mut children_hashes = Vec::with_capacity(num_children);
|
||||
|
||||
for _ in 0..num_children {
|
||||
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
|
||||
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
|
||||
let chunk_hash = hash(&chunk_node.serialize());
|
||||
storage.insert(chunk_hash, chunk_node);
|
||||
children_hashes.push(chunk_hash);
|
||||
}
|
||||
|
||||
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
|
||||
let hash = hash(&node.serialize());
|
||||
storage.insert(hash, node);
|
||||
Ok(hash)
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_random_directory_node(
|
||||
depth: u32,
|
||||
max_depth: u32,
|
||||
storage: &mut HashMap<NodeHash, MerkleNode>,
|
||||
) -> Result<NodeHash, String> {
|
||||
let mut rng = rng();
|
||||
let current_depth = depth + 1;
|
||||
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
|
||||
|
||||
if !is_big_dir || current_depth >= max_depth {
|
||||
// Generate a simple Directory Node (leaf level directory)
|
||||
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
|
||||
let mut entries = Vec::with_capacity(num_entries);
|
||||
|
||||
for _ in 0..num_entries {
|
||||
if rng.random_bool(0.7) {
|
||||
// 70% chance of creating a file (Chunk/Big)
|
||||
let file_hash = generate_random_file_node(storage)?;
|
||||
let entry = DirectoryEntry {
|
||||
filename: generate_random_filename(),
|
||||
content_hash: file_hash,
|
||||
};
|
||||
entries.push(entry);
|
||||
} else if current_depth < max_depth {
|
||||
// 30% chance of creating a subdirectory
|
||||
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
|
||||
|
||||
// Create a basic directory entry name
|
||||
let mut filename_bytes = [0; 32];
|
||||
let subdir_name = format!("dir_{}", current_depth);
|
||||
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
|
||||
|
||||
let entry = DirectoryEntry {
|
||||
filename: filename_bytes,
|
||||
content_hash: dir_hash,
|
||||
};
|
||||
entries.push(entry);
|
||||
}
|
||||
}
|
||||
|
||||
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
|
||||
let hash = hash(&node.serialize());
|
||||
storage.insert(hash, node);
|
||||
Ok(hash)
|
||||
} else {
|
||||
// Generate a BigDirectory Node (internal directory structure)
|
||||
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
|
||||
let mut children = Vec::with_capacity(num_children);
|
||||
|
||||
for _ in 0..num_children {
|
||||
// Children must be Directory or BigDirectory
|
||||
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
|
||||
children.push(child_hash);
|
||||
}
|
||||
|
||||
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
|
||||
let hash = hash(&node.serialize());
|
||||
storage.insert(hash, node);
|
||||
Ok(hash)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_random_tree(
|
||||
max_depth: u32,
|
||||
) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
|
||||
let mut storage = HashMap::new();
|
||||
|
||||
// Start tree generation from the root directory at depth 0
|
||||
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
|
||||
|
||||
Ok((root_hash, storage))
|
||||
}
|
||||
|
||||
pub fn generate_base_tree() -> MerkleTree {
|
||||
let mut res = HashMap::new();
|
||||
|
||||
let bob_content = "where is bob".to_string().into_bytes();
|
||||
let alice_content = "alice".to_string().into_bytes();
|
||||
let oscar_content = "oscar is the opponent".to_string().into_bytes();
|
||||
|
||||
let mut children_nodes = Vec::new();
|
||||
for i in 0..10 {
|
||||
let mut i_nodes = Vec::new();
|
||||
for j in 0..10 {
|
||||
let node1 = MerkleNode::Chunk(ChunkNode::new(bob_content.clone()).unwrap());
|
||||
let hash = hash(&node1.serialize());
|
||||
i_nodes.push(hash);
|
||||
res.insert(hash, node1);
|
||||
}
|
||||
let bignode = MerkleNode::Big(BigNode::new(i_nodes).unwrap());
|
||||
let hashbig = hash(&bignode.serialize());
|
||||
children_nodes.push(hashbig);
|
||||
res.insert(hashbig, bignode);
|
||||
}
|
||||
|
||||
let bignode = MerkleNode::Big(BigNode::new(children_nodes).unwrap());
|
||||
let hashbig = hash(&bignode.serialize());
|
||||
|
||||
let node1 = MerkleNode::Chunk(ChunkNode::new(bob_content).unwrap());
|
||||
let hash1 = hash(&node1.serialize());
|
||||
|
||||
let node2 = MerkleNode::Chunk(ChunkNode::new(alice_content).unwrap());
|
||||
let hash2 = hash(&node2.serialize());
|
||||
|
||||
//res.insert(hash1, node1);
|
||||
//res.insert(hash2, node2);
|
||||
res.insert(hashbig, bignode);
|
||||
|
||||
let node3 = MerkleNode::Chunk(ChunkNode::new(oscar_content).unwrap());
|
||||
let hash3 = hash(&node3.serialize());
|
||||
|
||||
//res.insert(hash3, node3);
|
||||
|
||||
let dir1 = MerkleNode::Directory(DirectoryNode {
|
||||
entries: [DirectoryEntry {
|
||||
filename: generate_random_filename(),
|
||||
content_hash: hash3,
|
||||
}]
|
||||
.to_vec(),
|
||||
});
|
||||
let hash_dir1 = hash(&dir1.serialize());
|
||||
|
||||
//res.insert(hash_dir1, dir1);
|
||||
|
||||
let root = MerkleNode::Directory(DirectoryNode {
|
||||
entries: [
|
||||
DirectoryEntry {
|
||||
filename: generate_random_filename(),
|
||||
content_hash: hashbig,
|
||||
},
|
||||
/*DirectoryEntry {
|
||||
filename: generate_random_filename(),
|
||||
content_hash: hash2,
|
||||
},
|
||||
DirectoryEntry {
|
||||
filename: generate_random_filename(),
|
||||
content_hash: hash_dir1,
|
||||
},*/
|
||||
]
|
||||
.to_vec(),
|
||||
});
|
||||
|
||||
let root_hash = Sha256::digest(&root.serialize());
|
||||
println!("root hash: {:?}", root_hash);
|
||||
res.insert(root_hash.try_into().expect("incorrect size"), root);
|
||||
|
||||
MerkleTree::new(res, root_hash.try_into().expect("incorrect size"))
|
||||
}
|
||||
|
||||
pub fn node_to_file(tree: &MerkleTree, node: &MerkleNode, path: String, i: u8) {
|
||||
match node.clone() {
|
||||
MerkleNode::Directory(dir) => {
|
||||
if i != 0 {
|
||||
let new_path = format!("{}/fold_{}", path.clone(), i);
|
||||
match create_dir(new_path.clone()) {
|
||||
Ok(_) => println!("Directory created successfully!"),
|
||||
Err(e) => println!("Failed to create directory: {}", e),
|
||||
}
|
||||
}
|
||||
for entry in dir.entries {
|
||||
// creer un fichier pour chaque entry
|
||||
if let Ok(filename_str) = String::from_utf8(entry.filename.to_vec()) {
|
||||
let new_name = format!("{}{}", path.clone(), remove_null_bytes(&filename_str));
|
||||
|
||||
println!("new_name: {}", new_name);
|
||||
let file = OpenOptions::new()
|
||||
.append(true)
|
||||
.create(true)
|
||||
.open(new_name.clone());
|
||||
match file {
|
||||
Ok(mut fileok) => {
|
||||
if let Some(current) = tree.data.get(&entry.content_hash) {
|
||||
big_or_chunk_to_file(&tree, ¤t, &mut fileok);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("error creaation file: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
MerkleNode::BigDirectory(bigdir) => {
|
||||
for entry in bigdir.children_hashes.iter() {
|
||||
if let Some(current) = tree.data.get(entry) {
|
||||
node_to_file(tree, current, path.clone(), i + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
eprintln!("invalid type of dir");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_null_bytes(input: &str) -> String {
|
||||
input.chars().filter(|&c| c != '\0').collect()
|
||||
}
|
||||
|
||||
pub fn big_or_chunk_to_file(tree: &MerkleTree, node: &MerkleNode, file: &mut File) {
|
||||
match node {
|
||||
MerkleNode::Big(big) => {
|
||||
for entry in big.children_hashes.iter() {
|
||||
if let Some(current) = tree.data.get(entry) {
|
||||
big_or_chunk_to_file(tree, current, file);
|
||||
}
|
||||
}
|
||||
}
|
||||
MerkleNode::Chunk(chunk) => {
|
||||
println!("wrote data");
|
||||
let _ = file.write_all(&chunk.data);
|
||||
}
|
||||
_ => {
|
||||
println!("invalid type of file");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
///
|
||||
/// creates a cryptographic signature
|
||||
///
|
||||
#[test]
|
||||
fn test_saving_tree() {
|
||||
if let Ok(current_dir) = env::current_dir() {
|
||||
println!("Current working directory: {:?}", current_dir);
|
||||
}
|
||||
|
||||
println!("--------- tree test starts ------------");
|
||||
|
||||
match create_dir("../Download/") {
|
||||
Ok(_) => println!("Directory created successfully!"),
|
||||
Err(e) => println!("Failed to create directory: {}", e),
|
||||
}
|
||||
let tree = generate_base_tree();
|
||||
|
||||
println!("--------- test tree created ------------");
|
||||
if let Some(root_node) = tree.data.get(&tree.root) {
|
||||
node_to_file(&tree, root_node, "../Download/".to_string(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*#[test]
|
||||
fn signing_message() {
|
||||
let username = String::from("gamixtreize");
|
||||
let crypto_pair = CryptographicSignature::new(username.clone());
|
||||
let handshake = HandshakeMessage::hello(0, 12, username);
|
||||
let ser = handshake.serialize();
|
||||
let signed_message = sign_message(&crypto_pair, &ser);
|
||||
println!("unsigned_message: {:?}", ser);
|
||||
println!("signed_message: {:?}", signed_message);
|
||||
}*/
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ pub fn parse_received_datum(
|
||||
datum_length: usize,
|
||||
) -> Option<([u8; 32], MerkleNode)> {
|
||||
let hash_name: [u8; 32] = recevied_datum[..32].try_into().expect("error");
|
||||
let sigstart = datum_length - 64;
|
||||
let value = &recevied_datum[32..sigstart];
|
||||
let value = &recevied_datum[32..datum_length];
|
||||
let value_slice = value.to_vec();
|
||||
println!("valueslice: {:?}, {}", value_slice, value_slice.len());
|
||||
let datum_type = value_slice[0];
|
||||
match datum_type {
|
||||
CHUNK => Some((
|
||||
@@ -21,14 +21,17 @@ pub fn parse_received_datum(
|
||||
MerkleNode::Chunk(crate::ChunkNode { data: value_slice }),
|
||||
)),
|
||||
DIRECTORY => {
|
||||
let nb_entries = value_slice[1];
|
||||
let mut dir_entries = Vec::new();
|
||||
let mut offset = 1 as usize;
|
||||
for i in 0..nb_entries {
|
||||
offset = (offset as u8 + 64 * i) as usize;
|
||||
let name = &recevied_datum[offset..offset + 32];
|
||||
for i in 0..((value_slice.len() - 1) / 64) as u8 {
|
||||
offset = (1 + 64 * i as usize) as usize;
|
||||
println!("offset:{}, i:{}", offset, i);
|
||||
let name = &value_slice[offset..offset + 32];
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
|
||||
hash.copy_from_slice(&value_slice[offset + 32..offset + 64]);
|
||||
let dp_name = String::from_utf8(name.to_vec()).expect("err");
|
||||
println!("name:{}", dp_name);
|
||||
|
||||
// envoyer un datum request
|
||||
dir_entries.push(DirectoryEntry {
|
||||
filename: name.try_into().expect("incorrect size"),
|
||||
@@ -46,6 +49,7 @@ pub fn parse_received_datum(
|
||||
}
|
||||
}
|
||||
BIG => {
|
||||
println!("its a BIG bro");
|
||||
let chlidren: Vec<NodeHash> = Vec::new();
|
||||
Some((
|
||||
hash_name,
|
||||
@@ -53,28 +57,20 @@ pub fn parse_received_datum(
|
||||
children_hashes: chlidren,
|
||||
}),
|
||||
))
|
||||
/*let chlidren: Vec<NodeHash> = Vec::new();
|
||||
tree.data.insert(
|
||||
hash_name,
|
||||
MerkleNode::Big(crate::BigNode {
|
||||
children_hashes: chlidren,
|
||||
}),
|
||||
);*/
|
||||
}
|
||||
BIGDIRECTORY => {
|
||||
let nb_entries = value_slice[1];
|
||||
let mut dir_entries = Vec::new();
|
||||
let mut bigdir_entries: Vec<NodeHash> = Vec::new();
|
||||
let mut offset = 1 as usize;
|
||||
for i in 0..nb_entries {
|
||||
offset = (offset as u8 + 64 * i) as usize;
|
||||
let name = &recevied_datum[offset..offset + 32];
|
||||
let mut hash = [0u8; 32];
|
||||
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
|
||||
for i in 0..((value_slice.len() - 1) / 32) as u8 {
|
||||
offset = (1 + 32 * i as usize) as usize;
|
||||
println!("offset:{}, i:{}", offset, i);
|
||||
let hash = &value_slice[offset..offset + 32];
|
||||
|
||||
// envoyer un datum request
|
||||
dir_entries.push(hash);
|
||||
bigdir_entries.push(hash.try_into().expect("incorrect size"));
|
||||
}
|
||||
|
||||
let current = BigDirectoryNode::new(dir_entries);
|
||||
let current = BigDirectoryNode::new(bigdir_entries);
|
||||
match current {
|
||||
Ok(current_node) => Some((hash_name, MerkleNode::BigDirectory(current_node))),
|
||||
Err(e) => {
|
||||
|
||||
@@ -13,18 +13,23 @@ mod threads_handling;
|
||||
mod timestamp;
|
||||
|
||||
use crate::fetchsocketaddresserror::FetchSocketAddressError;
|
||||
use crate::messages_structure::ROOTREPLY;
|
||||
use crate::peers_refresh::*;
|
||||
use crate::timestamp::Timestamp;
|
||||
use crate::{
|
||||
cryptographic_signature::CryptographicSignature,
|
||||
message_handling::EventType,
|
||||
messages_channels::{MultipleSenders, start_receving_thread, start_retry_thread},
|
||||
messages_structure::{NATTRAVERSALREQUEST, PING, ROOTREQUEST, construct_message},
|
||||
messages_structure::{
|
||||
DATUM, DATUMREQUEST, NATTRAVERSALREQUEST, NATTRAVERSALREQUEST2, NODATUM, PING, ROOTREQUEST,
|
||||
construct_message,
|
||||
},
|
||||
peers_refresh::HandshakeHistory,
|
||||
registration::{parse_addresses, perform_handshake, register_with_the_server},
|
||||
server_communication::{generate_id, get_peer_list},
|
||||
threads_handling::Worker,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::{
|
||||
io::Error,
|
||||
net::{IpAddr, Ipv4Addr, UdpSocket},
|
||||
@@ -176,7 +181,11 @@ pub enum NetworkCommand {
|
||||
Disconnect(),
|
||||
ResetServerPeer(),
|
||||
Discover(String, String, String),
|
||||
GetChildren(String, String),
|
||||
GetChildren([u8; 32], String, bool),
|
||||
SendDatum(MerkleNode, [u8; 32], String),
|
||||
SendNoDatum(Vec<u8>, String),
|
||||
SendRootReply(Vec<u8>, String),
|
||||
InitDownload([u8; 32], String),
|
||||
// ...
|
||||
}
|
||||
|
||||
@@ -189,11 +198,14 @@ pub enum NetworkEvent {
|
||||
Success(String),
|
||||
PeerConnected(String),
|
||||
PeerListUpdated(Vec<(String, bool)>),
|
||||
FileTreeReceived([u8; 32], MerkleNode), // peer_id, content
|
||||
DataReceived(String, MerkleNode),
|
||||
FileTreeReceived([u8; 32], MerkleNode, String), // peer_id, content
|
||||
DataReceived([u8; 32], MerkleNode, String),
|
||||
FileTreeRootReceived(String, NodeHash),
|
||||
HandshakeFailed(),
|
||||
ServerHandshakeFailed(String),
|
||||
DatumRequest([u8; 32], String),
|
||||
RootRequest(String),
|
||||
InitDownload([u8; 32], String),
|
||||
// ...
|
||||
}
|
||||
|
||||
@@ -234,6 +246,81 @@ pub fn start_p2p_executor(
|
||||
// Check for commands from the GUI
|
||||
if let Ok(cmd) = cmd_rx.try_recv() {
|
||||
match cmd {
|
||||
NetworkCommand::InitDownload(hash, ip) => {
|
||||
if let Some(sd) = shared_data.as_ref() {
|
||||
if let Some(res) = sd.handshake_peers.get_peer_info_username(ip) {
|
||||
let _ = event_tx
|
||||
.send(NetworkEvent::InitDownload(hash, res.ip.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkCommand::SendRootReply(node_hash, addr) => {
|
||||
if let Some(sd) = shared_data.as_mut() {
|
||||
let mut payload = Vec::new();
|
||||
payload.extend_from_slice(&node_hash);
|
||||
let new_id = generate_id();
|
||||
let message =
|
||||
construct_message(ROOTREPLY, payload, new_id, sd.cryptopair_ref());
|
||||
|
||||
match message {
|
||||
None => {}
|
||||
Some(resp_msg) => {
|
||||
println!("msg_sent:{:?}", resp_msg);
|
||||
sd.senders_ref().send_dispatch(
|
||||
resp_msg,
|
||||
addr.clone(),
|
||||
false,
|
||||
sd.messages_list(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkCommand::SendNoDatum(node_hash, addr) => {
|
||||
if let Some(sd) = shared_data.as_mut() {
|
||||
let mut payload = Vec::new();
|
||||
payload.extend_from_slice(&node_hash);
|
||||
let new_id = generate_id();
|
||||
let message =
|
||||
construct_message(NODATUM, payload, new_id, sd.cryptopair_ref());
|
||||
|
||||
match message {
|
||||
None => {}
|
||||
Some(resp_msg) => {
|
||||
println!("msg_sent:{:?}", resp_msg);
|
||||
sd.senders_ref().send_dispatch(
|
||||
resp_msg,
|
||||
addr.clone(),
|
||||
false,
|
||||
sd.messages_list(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkCommand::SendDatum(merklennode, node_hash, addr) => {
|
||||
if let Some(sd) = shared_data.as_mut() {
|
||||
let mut payload = Vec::new();
|
||||
payload.extend_from_slice(&node_hash);
|
||||
payload.extend_from_slice(&merklennode.serialize());
|
||||
let new_id = generate_id();
|
||||
let message =
|
||||
construct_message(DATUM, payload, new_id, sd.cryptopair_ref());
|
||||
|
||||
match message {
|
||||
None => {}
|
||||
Some(resp_msg) => {
|
||||
println!("msg_sent:{:?}", resp_msg);
|
||||
sd.senders_ref().send_dispatch(
|
||||
resp_msg,
|
||||
addr.clone(),
|
||||
false,
|
||||
sd.messages_list(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkCommand::ServerHandshake(username, ip) => {
|
||||
println!("server handshake called");
|
||||
if let Some(sd) = shared_data.as_mut() {
|
||||
@@ -319,8 +406,41 @@ pub fn start_p2p_executor(
|
||||
println!("no shared data");
|
||||
}
|
||||
}
|
||||
NetworkCommand::GetChildren(username, hash) => {
|
||||
// envoie un datum request au peer
|
||||
NetworkCommand::GetChildren(hash, ip, is_file) => {
|
||||
if let Some(sd) = shared_data.as_ref() {
|
||||
let mut payload = Vec::new();
|
||||
payload.extend_from_slice(&hash);
|
||||
let new_id = generate_id();
|
||||
let datumreqest = construct_message(
|
||||
DATUMREQUEST,
|
||||
payload,
|
||||
new_id,
|
||||
sd.cryptopair_ref(),
|
||||
);
|
||||
match datumreqest {
|
||||
None => {}
|
||||
Some(resp_msg) => {
|
||||
if is_file {
|
||||
sd.add_message(new_id, EventType::DatumRequestBig);
|
||||
} else {
|
||||
sd.add_message(new_id, EventType::DatumRequest);
|
||||
}
|
||||
println!("msg_sent:{:?}", resp_msg);
|
||||
sd.senders_ref().add_message_to_retry_queue(
|
||||
resp_msg.clone(),
|
||||
ip.clone(),
|
||||
false,
|
||||
);
|
||||
|
||||
sd.senders_ref().send_dispatch(
|
||||
resp_msg,
|
||||
ip.clone(),
|
||||
false,
|
||||
sd.messages_list(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkCommand::RequestDirectoryContent(_, _) => {
|
||||
println!("[Network] RequestDirectoryContent() called");
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::{
|
||||
NetworkEvent, NodeHash, P2PSharedData,
|
||||
cryptographic_signature::{
|
||||
CryptographicSignature, get_peer_key, sign_message, verify_signature,
|
||||
},
|
||||
NetworkEvent, NodeHash,
|
||||
cryptographic_signature::{CryptographicSignature, get_peer_key, verify_signature},
|
||||
datum_parsing::parse_received_datum,
|
||||
messages_channels::MultipleSenders,
|
||||
messages_structure::construct_message,
|
||||
peers_refresh::HandshakeHistory,
|
||||
registration,
|
||||
server_communication::generate_id,
|
||||
timestamp::Timestamp,
|
||||
};
|
||||
@@ -32,6 +27,7 @@ pub enum EventType {
|
||||
Ping,
|
||||
NatTraversal,
|
||||
DatumRequest,
|
||||
DatumRequestBig,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
@@ -45,6 +41,7 @@ impl EventType {
|
||||
EventType::NatTraversal => "NatTraversal".to_owned(),
|
||||
EventType::DatumRequest => "DatumRequest".to_owned(),
|
||||
EventType::Unknown => "Unknown".to_owned(),
|
||||
EventType::DatumRequestBig => "DatumRequestBig".to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,8 +458,34 @@ pub fn parse_message(
|
||||
parse_received_datum(received_datum.to_vec(), received_length as usize);
|
||||
match parsed_node {
|
||||
Some(tuple) => {
|
||||
let _ =
|
||||
cmd_tx.send(NetworkEvent::FileTreeReceived(tuple.0, tuple.1));
|
||||
let _ = cmd_tx.send(NetworkEvent::FileTreeReceived(
|
||||
tuple.0,
|
||||
tuple.1,
|
||||
ip.to_string(),
|
||||
));
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
EventType::DatumRequestBig => {
|
||||
let _ = &guard.remove_entry(&id);
|
||||
println!("message {} retiré de la liste", id);
|
||||
let received_length = u16::from_be_bytes(
|
||||
received_message[TYPE..LENGTH]
|
||||
.try_into()
|
||||
.expect("incorrect size"),
|
||||
);
|
||||
let received_datum = &received_message[LENGTH..];
|
||||
let parsed_node =
|
||||
parse_received_datum(received_datum.to_vec(), received_length as usize);
|
||||
match parsed_node {
|
||||
Some(tuple) => {
|
||||
let _ = cmd_tx.send(NetworkEvent::DataReceived(
|
||||
tuple.0,
|
||||
tuple.1,
|
||||
ip.to_string(),
|
||||
));
|
||||
println!("datareceived event sent");
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
@@ -472,6 +495,22 @@ pub fn parse_message(
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
ROOTREQUEST => {
|
||||
println!("root request received");
|
||||
let _ = cmd_tx.send(NetworkEvent::RootRequest(ip.to_string()));
|
||||
}
|
||||
DATUMREQUEST => {
|
||||
let received_length = u16::from_be_bytes(
|
||||
received_message[TYPE..LENGTH]
|
||||
.try_into()
|
||||
.expect("incorrect size"),
|
||||
);
|
||||
let received_hash = &received_message[LENGTH..LENGTH + received_length as usize];
|
||||
let _ = cmd_tx.send(NetworkEvent::DatumRequest(
|
||||
received_hash.try_into().expect("incorrect size"),
|
||||
ip.to_string(),
|
||||
));
|
||||
}
|
||||
_ => return None,
|
||||
};
|
||||
constructed_message
|
||||
|
||||
@@ -224,7 +224,7 @@ impl HandshakeMessage {
|
||||
mod tests {
|
||||
// Note this useful idiom: importing names from outer (for mod tests) scope.
|
||||
use super::*;
|
||||
|
||||
/*
|
||||
/// creates an handshake message
|
||||
#[tokio::test]
|
||||
async fn creating_handshake_msg() {
|
||||
@@ -242,5 +242,5 @@ mod tests {
|
||||
let parsed = HandshakeMessage::parse(ser);
|
||||
handshake.display();
|
||||
parsed.display();
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ pub fn update_handshake(
|
||||
}
|
||||
}
|
||||
drop(guard);
|
||||
thread::sleep(Duration::from_secs(240));
|
||||
thread::sleep(Duration::from_secs(60));
|
||||
}
|
||||
});
|
||||
Worker::spawn(handle, crate::threads_handling::WorkerType::PING)
|
||||
|
||||
BIN
project.pdf
BIN
project.pdf
Binary file not shown.
25
todo.md
25
todo.md
@@ -1,24 +1,11 @@
|
||||
# Todo
|
||||
|
||||
## bugfix
|
||||
|
||||
- ajouter hello et nat a l'exp backoff OK
|
||||
- peers n'ayant pas d'adresse OK
|
||||
- verifier le refresh des peers
|
||||
|
||||
- setting in gui to act as a relay
|
||||
|
||||
- make hello and helloreply messages set the first extension bit to announce that peer is available for nat traversal
|
||||
- implement actual nat traversal requests
|
||||
- implement nat traversal :
|
||||
- if hello/helloreply doesnt work with a peer, find a peer that supports nat traversal (server in priority) then begin protocol
|
||||
|
||||
## fonctionnalités :
|
||||
|
||||
rechercher les fichiers d'un pair
|
||||
telechargement des fichiers
|
||||
choisir un dossier à partager
|
||||
choisir le nombre de canaux
|
||||
- proposer des fichiers
|
||||
- telechargement des fichiers
|
||||
- receivers threads
|
||||
- ask for nat traversal
|
||||
|
||||
## autre
|
||||
|
||||
@@ -26,6 +13,7 @@ socket ipv6
|
||||
|
||||
# FAIT
|
||||
|
||||
rechercher les fichiers d'un pair OK
|
||||
- choisir un pseudo OK
|
||||
- get rsquest to the uri /peers/ OK
|
||||
- generation of the cryptographic key OK
|
||||
@@ -44,3 +32,6 @@ socket ipv6
|
||||
- datum/nodatum and datum structures
|
||||
- nattraversal 1 and 2 structures
|
||||
- chunk, directory, big, bigdirectory structures
|
||||
- ajouter hello et nat a l'exp backoff OK
|
||||
- peers n'ayant pas d'adresse OK
|
||||
- verifier le refresh des peers OK
|
||||
|
||||
Reference in New Issue
Block a user