This commit is contained in:
TIBERGHIEN corentin
2026-01-21 02:45:48 +01:00
parent dacedd1ceb
commit 8b2ab4861b
10 changed files with 351 additions and 460 deletions

View File

@@ -571,7 +571,7 @@ impl P2PClientApp {
.enabled(true) .enabled(true)
.show(ui, |ui| { .show(ui, |ui| {
for child in &node.children_hashes { for child in &node.children_hashes {
self.draw_file_node(ui, child.content_hash, tree, depth + 1, None); self.draw_file_node(ui, child.clone(), tree, depth + 1, None);
} }
}); });
} }

View File

@@ -3,51 +3,12 @@ use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher}; use std::hash::{DefaultHasher, Hash, Hasher};
// --- Constants --- // --- Constants ---
const MAX_CHUNK_DATA_SIZE: usize = 1024; pub const MAX_CHUNK_DATA_SIZE: usize = 1024;
const MAX_DIRECTORY_ENTRIES: usize = 16; pub const MAX_DIRECTORY_ENTRIES: usize = 16;
const MAX_BIG_CHILDREN: usize = 32; pub const MAX_BIG_CHILDREN: usize = 32;
const MIN_BIG_CHILDREN: usize = 2; pub const MIN_BIG_CHILDREN: usize = 2;
const FILENAME_HASH_SIZE: usize = 32; pub const FILENAME_HASH_SIZE: usize = 32;
const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes pub const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
fn hash(data: &[u8]) -> NodeHash {
let mut hasher = DefaultHasher::new();
data.hash(&mut hasher);
let hash_u64 = hasher.finish();
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
for i in 0..8 {
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
}
hash_array // The rest remains 0, satisfying the 32-byte requirement
}
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
let mut rng = rand::rng();
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
// Generate a random length for the base name
let name_len = rng.random_range(5..21);
// Generate random alphanumeric characters
for i in 0..name_len {
let char_code = rng.random_range(97..123); // 'a' through 'z'
if i < FILENAME_HASH_SIZE {
filename_bytes[i] = char_code as u8;
}
}
// Append a common extension
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
let ext_bytes = ext.as_bytes();
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
if start_index < FILENAME_HASH_SIZE {
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
}
filename_bytes
}
pub type NodeHash = [u8; FILENAME_HASH_SIZE]; pub type NodeHash = [u8; FILENAME_HASH_SIZE];
@@ -80,100 +41,6 @@ impl MerkleTree {
} }
} }
/*fn generate_random_file_node(
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
if !is_big {
// Generate a simple Chunk Node
let node = MerkleNode::Chunk(ChunkNode::new_random());
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a Big Node (a file composed of chunks)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
let mut children_hashes = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
let chunk_hash = hash(&chunk_node.serialize());
storage.insert(chunk_hash, chunk_node);
children_hashes.push(chunk_hash);
}
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}*/
/*fn generate_random_directory_node(
depth: u32,
max_depth: u32,
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let current_depth = depth + 1;
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
if !is_big_dir || current_depth >= max_depth {
// Generate a simple Directory Node (leaf level directory)
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
let mut entries = Vec::with_capacity(num_entries);
for _ in 0..num_entries {
if rng.random_bool(0.7) {
// 70% chance of creating a file (Chunk/Big)
let file_hash = generate_random_file_node(storage)?;
let entry = DirectoryEntry {
filename: generate_random_filename(),
content_hash: file_hash,
};
entries.push(entry);
} else if current_depth < max_depth {
// 30% chance of creating a subdirectory
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
// Create a basic directory entry name
let mut filename_bytes = [0; 32];
let subdir_name = format!("dir_{}", current_depth);
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
let entry = DirectoryEntry {
filename: filename_bytes,
content_hash: dir_hash,
};
entries.push(entry);
}
}
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a BigDirectory Node (internal directory structure)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
let mut children = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Directory or BigDirectory
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
children.push(child_hash);
}
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}*/
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ChunkNode { pub struct ChunkNode {
pub data: Vec<u8>, pub data: Vec<u8>,
@@ -208,7 +75,7 @@ impl ChunkNode {
// Helper struct // Helper struct
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct DirectoryEntry { pub struct DirectoryEntry {
pub filename: Vec<u8>, pub filename: [u8; FILENAME_HASH_SIZE],
pub content_hash: NodeHash, pub content_hash: NodeHash,
} }
@@ -240,7 +107,7 @@ pub struct BigNode {
} }
impl BigNode { impl BigNode {
/*pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> { pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
let n = children_hashes.len(); let n = children_hashes.len();
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN { if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
return Err(format!( return Err(format!(
@@ -249,17 +116,17 @@ impl BigNode {
)); ));
} }
Ok(BigNode { children_hashes }) Ok(BigNode { children_hashes })
}*/ }
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct BigDirectoryNode { pub struct BigDirectoryNode {
//pub children_hashes: Vec<NodeHash>, pub children_hashes: Vec<NodeHash>,
pub children_hashes: Vec<DirectoryEntry>, // pub children_hashes: Vec<DirectoryEntry>,
} }
impl BigDirectoryNode { impl BigDirectoryNode {
/*pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> { pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
let n = children_hashes.len(); let n = children_hashes.len();
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN { if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
return Err(format!( return Err(format!(
@@ -268,14 +135,6 @@ impl BigDirectoryNode {
)); ));
} }
Ok(BigDirectoryNode { children_hashes }) Ok(BigDirectoryNode { children_hashes })
}*/
pub fn new(entries: Vec<DirectoryEntry>) -> Result<Self, String> {
if entries.len() > MAX_DIRECTORY_ENTRIES {
return Err(format!("Directory exceeds {} bytes", entries.len()));
}
Ok(BigDirectoryNode {
children_hashes: entries,
})
} }
} }
@@ -310,73 +169,10 @@ impl MerkleNode {
} }
MerkleNode::BigDirectory(node) => { MerkleNode::BigDirectory(node) => {
for hash in &node.children_hashes { for hash in &node.children_hashes {
bytes.extend_from_slice(&hash.content_hash); bytes.extend_from_slice(hash);
} }
} }
} }
bytes bytes
} }
/*pub fn generate_random_tree(
max_depth: u32,
) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
let mut storage = HashMap::new();
// Start tree generation from the root directory at depth 0
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
Ok((root_hash, storage))
}*/
/*pub fn generate_base_tree() -> (NodeHash, HashMap<NodeHash, MerkleNode>) {
let mut res = HashMap::new();
let node1 = MerkleNode::Chunk(ChunkNode::new_random());
let hash1 = hash(&node1.serialize());
let node2 = MerkleNode::Chunk(ChunkNode::new_random());
let hash2 = hash(&node2.serialize());
res.insert(hash1, node1);
res.insert(hash2, node2);
let node3 = MerkleNode::Chunk(ChunkNode::new_random());
let hash3 = hash(&node3.serialize());
res.insert(hash3, node3);
let dir1 = MerkleNode::Directory(DirectoryNode {
entries: [DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash3,
}]
.to_vec(),
});
let hash_dir1 = hash(&dir1.serialize());
res.insert(hash_dir1, dir1);
let root = MerkleNode::Directory(DirectoryNode {
entries: [
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash1,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash2,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash_dir1,
},
]
.to_vec(),
});
let root_hash = hash(&root.serialize());
res.insert(root_hash, root);
(root_hash, res)
}*/
} }

View File

@@ -0,0 +1,200 @@
use crate::data::*;
use rand::{Rng, rng};
use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
fn hash(data: &[u8]) -> NodeHash {
let mut hasher = DefaultHasher::new();
data.hash(&mut hasher);
let hash_u64 = hasher.finish();
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
for i in 0..8 {
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
}
hash_array // The rest remains 0, satisfying the 32-byte requirement
}
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
let mut rng = rand::rng();
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
// Generate a random length for the base name
let name_len = rng.random_range(5..21);
// Generate random alphanumeric characters
for i in 0..name_len {
let char_code = rng.random_range(97..123); // 'a' through 'z'
if i < FILENAME_HASH_SIZE {
filename_bytes[i] = char_code as u8;
}
}
// Append a common extension
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
let ext_bytes = ext.as_bytes();
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
if start_index < FILENAME_HASH_SIZE {
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
}
filename_bytes
}
fn generate_random_file_node(
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
if !is_big {
// Generate a simple Chunk Node
let node = MerkleNode::Chunk(ChunkNode::new_random());
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a Big Node (a file composed of chunks)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
let mut children_hashes = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
let chunk_hash = hash(&chunk_node.serialize());
storage.insert(chunk_hash, chunk_node);
children_hashes.push(chunk_hash);
}
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}
fn generate_random_directory_node(
depth: u32,
max_depth: u32,
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let current_depth = depth + 1;
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
if !is_big_dir || current_depth >= max_depth {
// Generate a simple Directory Node (leaf level directory)
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
let mut entries = Vec::with_capacity(num_entries);
for _ in 0..num_entries {
if rng.random_bool(0.7) {
// 70% chance of creating a file (Chunk/Big)
let file_hash = generate_random_file_node(storage)?;
let entry = DirectoryEntry {
filename: generate_random_filename(),
content_hash: file_hash,
};
entries.push(entry);
} else if current_depth < max_depth {
// 30% chance of creating a subdirectory
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
// Create a basic directory entry name
let mut filename_bytes = [0; 32];
let subdir_name = format!("dir_{}", current_depth);
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
let entry = DirectoryEntry {
filename: filename_bytes,
content_hash: dir_hash,
};
entries.push(entry);
}
}
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a BigDirectory Node (internal directory structure)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
let mut children = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Directory or BigDirectory
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
children.push(child_hash);
}
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}
pub fn generate_random_tree(
max_depth: u32,
) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
let mut storage = HashMap::new();
// Start tree generation from the root directory at depth 0
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
Ok((root_hash, storage))
}
pub fn generate_base_tree() -> (NodeHash, HashMap<NodeHash, MerkleNode>) {
let mut res = HashMap::new();
let node1 = MerkleNode::Chunk(ChunkNode::new_random());
let hash1 = hash(&node1.serialize());
let node2 = MerkleNode::Chunk(ChunkNode::new_random());
let hash2 = hash(&node2.serialize());
res.insert(hash1, node1);
res.insert(hash2, node2);
let node3 = MerkleNode::Chunk(ChunkNode::new_random());
let hash3 = hash(&node3.serialize());
res.insert(hash3, node3);
let dir1 = MerkleNode::Directory(DirectoryNode {
entries: [DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash3,
}]
.to_vec(),
});
let hash_dir1 = hash(&dir1.serialize());
res.insert(hash_dir1, dir1);
let root = MerkleNode::Directory(DirectoryNode {
entries: [
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash1,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash2,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash_dir1,
},
]
.to_vec(),
});
let root_hash = hash(&root.serialize());
res.insert(root_hash, root);
(root_hash, res)
}

View File

@@ -31,7 +31,7 @@ pub fn parse_received_datum(
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]); hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
// envoyer un datum request // envoyer un datum request
dir_entries.push(DirectoryEntry { dir_entries.push(DirectoryEntry {
filename: name.to_vec(), filename: name.try_into().expect("incorrect size"),
content_hash: hash, content_hash: hash,
}); });
} }
@@ -71,10 +71,7 @@ pub fn parse_received_datum(
let mut hash = [0u8; 32]; let mut hash = [0u8; 32];
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]); hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
// envoyer un datum request // envoyer un datum request
dir_entries.push(DirectoryEntry { dir_entries.push(hash);
filename: name.to_vec(),
content_hash: hash,
});
} }
let current = BigDirectoryNode::new(dir_entries); let current = BigDirectoryNode::new(dir_entries);

View File

@@ -1,5 +1,6 @@
mod cryptographic_signature; mod cryptographic_signature;
mod data; mod data;
mod datum_generation;
mod datum_parsing; mod datum_parsing;
mod message_handling; mod message_handling;
mod messages_channels; mod messages_channels;
@@ -9,6 +10,7 @@ mod registration;
mod server_communication; mod server_communication;
mod threads_handling; mod threads_handling;
use crate::peers_refresh::*;
use crate::{ use crate::{
cryptographic_signature::CryptographicSignature, cryptographic_signature::CryptographicSignature,
message_handling::EventType, message_handling::EventType,
@@ -22,8 +24,10 @@ use crate::{
threads_handling::Worker, threads_handling::Worker,
}; };
use std::{ use std::{
clone,
io::Error, io::Error,
net::{IpAddr, Ipv4Addr, UdpSocket}, net::{IpAddr, Ipv4Addr, UdpSocket},
time::Duration,
}; };
use std::{ use std::{
net::SocketAddr, net::SocketAddr,
@@ -42,6 +46,7 @@ pub struct P2PSharedData {
use bytes::Bytes; use bytes::Bytes;
use p256::pkcs8::der::pem::Base64Encoder; use p256::pkcs8::der::pem::Base64Encoder;
use reqwest::Client;
impl P2PSharedData { impl P2PSharedData {
pub fn new( pub fn new(
@@ -63,9 +68,6 @@ impl P2PSharedData {
let server_name = Arc::new(Mutex::new("".to_string())); let server_name = Arc::new(Mutex::new("".to_string()));
let handhsake_peers = Arc::new(HandshakeHistory::new()); let handhsake_peers = Arc::new(HandshakeHistory::new());
let worker = handhsake_peers.update_handshake();
threads.push(worker);
Ok(P2PSharedData { Ok(P2PSharedData {
shared_socket: shared_socket, shared_socket: shared_socket,
shared_cryptopair: shared_cryptopair, shared_cryptopair: shared_cryptopair,
@@ -100,13 +102,12 @@ impl P2PSharedData {
pub fn socket_ref(&self) -> &UdpSocket { pub fn socket_ref(&self) -> &UdpSocket {
&*self.shared_socket &*self.shared_socket
} }
pub fn handshakes(&self) -> Arc<HandshakeHistory> {
self.handshake_peers.clone()
}
pub fn cryptopair_ref(&self) -> &CryptographicSignature { pub fn cryptopair_ref(&self) -> &CryptographicSignature {
&*self.shared_cryptopair &*self.shared_cryptopair
} }
pub fn handshake_ref(&self) -> &HandshakeHistory {
&*self.handshake_peers
}
pub fn messages_list_ref(&self) -> &Mutex<HashMap<i32, EventType>> { pub fn messages_list_ref(&self) -> &Mutex<HashMap<i32, EventType>> {
&*self.shared_messageslist &*self.shared_messageslist
@@ -195,8 +196,6 @@ pub fn start_p2p_executor(
// Use tokio to spawn the asynchronous networking logic // Use tokio to spawn the asynchronous networking logic
tokio::task::spawn(async move { tokio::task::spawn(async move {
// P2P/Networking Setup goes here // P2P/Networking Setup goes here
let handshake_history = Arc::new(Mutex::new(HandshakeHistory::new()));
let handshake_clone = handshake_history.clone();
println!("Network executor started."); println!("Network executor started.");
@@ -208,13 +207,20 @@ pub fn start_p2p_executor(
NetworkCommand::ServerHandshake(username, ip) => { NetworkCommand::ServerHandshake(username, ip) => {
println!("server handshake called"); println!("server handshake called");
if let Some(sd) = shared_data.as_mut() { if let Some(sd) = shared_data.as_mut() {
start_receving_thread(sd, event_tx.clone(), &handshake_clone); start_receving_thread(sd, event_tx.clone(), sd.handshakes());
start_retry_thread( start_retry_thread(
sd.senders(), sd.senders(),
4, 4,
sd.messages_list(), sd.messages_list(),
sd.threads().as_mut(), sd.threads().as_mut(),
); );
update_handshake(
sd.senders(),
sd.cryptopair(),
sd.messages_list(),
sd.handshake_peers.username_k_peerinfo_v.clone(),
);
let res = let res =
perform_handshake(&sd, username, ip, event_tx.clone(), true).await; perform_handshake(&sd, username, ip, event_tx.clone(), true).await;
} else { } else {
@@ -234,10 +240,7 @@ pub fn start_p2p_executor(
NetworkCommand::Discover(username, hash, ip) => { NetworkCommand::Discover(username, hash, ip) => {
// envoie un handshake au peer, puis un root request // envoie un handshake au peer, puis un root request
if let Some(sd) = shared_data.as_ref() { if let Some(sd) = shared_data.as_ref() {
let res = { let res = sd.handshake_peers.get_peer_info_username(username.clone());
let m = handshake_clone.lock().unwrap();
m.get_peer_info_username(username.clone()).cloned()
};
match res { match res {
Some(peerinfo) => { Some(peerinfo) => {
let id = generate_id(); let id = generate_id();
@@ -392,7 +395,11 @@ pub fn start_p2p_executor(
); );
} }
} }
None => {} None => {
let err_msg =
format!("failed to retreive socket address:").to_string();
let res = event_tx.send(NetworkEvent::Error(err_msg));
}
} }
} }
println!("[Network] Ping() called"); println!("[Network] Ping() called");
@@ -428,13 +435,15 @@ pub fn start_p2p_executor(
print!("{:?}", payload.clone()); print!("{:?}", payload.clone());
let id = generate_id();
let natreq = construct_message( let natreq = construct_message(
NATTRAVERSALREQUEST, NATTRAVERSALREQUEST,
payload.clone(), payload.clone(),
generate_id(), id.clone(),
&sd.cryptopair(), &sd.cryptopair(),
); );
sd.add_message(id, EventType::NatTraversal);
sd.senders_ref().send_dispatch( sd.senders_ref().send_dispatch(
natreq.expect( natreq.expect(
"couldnt construct message nattraversalrequest2", "couldnt construct message nattraversalrequest2",
@@ -502,11 +511,14 @@ fn parse_pack(s: &str) -> Option<[u8; 6]> {
/// ///
pub async fn get_socket_address(username: String, ip: String) -> Option<SocketAddr> { pub async fn get_socket_address(username: String, ip: String) -> Option<SocketAddr> {
let client = reqwest::Client::new(); let client = Client::builder()
.timeout(Duration::from_secs(5))
.build()
.expect("cannot create client");
let uri = format!("{}/peers/{}/addresses", ip, username); let uri = format!("{}/peers/{}/addresses", ip, username);
let res = client.get(uri).send().await.expect("couldnt get response"); let res = client.get(uri).send().await.expect("couldnt get response");
if res.status().is_success() { if res.status().is_success() {
println!("Successfully retreived the addresses."); println!("Successfully retreived the addresses. {}", res.status());
} else { } else {
eprintln!( eprintln!(
"Failed to get the peers addresses from the server. Status: {}", "Failed to get the peers addresses from the server. Status: {}",

View File

@@ -36,7 +36,7 @@ const LENGTH: usize = 7;
const EXTENSIONS: usize = 4; const EXTENSIONS: usize = 4;
const SIGNATURE: usize = 64; const SIGNATURE: usize = 64;
const PING: u8 = 0; pub const PING: u8 = 0;
const OK: u8 = 128; const OK: u8 = 128;
const ERROR: u8 = 129; const ERROR: u8 = 129;
const HELLO: u8 = 1; const HELLO: u8 = 1;
@@ -58,7 +58,7 @@ pub fn handle_recevied_message(
server_name: &String, server_name: &String,
cmd_tx: crossbeam_channel::Sender<NetworkEvent>, cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
ip: SocketAddr, ip: SocketAddr,
handhsake_history: &Arc<Mutex<HandshakeHistory>>, handhsake_history: Arc<HandshakeHistory>,
) { ) {
if recevied_message.len() < 4 { if recevied_message.len() < 4 {
return; return;
@@ -114,10 +114,9 @@ pub fn parse_message(
cmd_tx: crossbeam_channel::Sender<NetworkEvent>, cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
ip: SocketAddr, ip: SocketAddr,
messages_list: &Arc<Mutex<HashMap<i32, EventType>>>, messages_list: &Arc<Mutex<HashMap<i32, EventType>>>,
handhsake_history_mutex: &Arc<Mutex<HandshakeHistory>>, handhsake_history: Arc<HandshakeHistory>,
senders: &MultipleSenders, senders: &MultipleSenders,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
let mut handhsake_history = handhsake_history_mutex.lock().unwrap();
let cmd_tx_clone = cmd_tx.clone(); let cmd_tx_clone = cmd_tx.clone();
let id_bytes: [u8; 4] = received_message[0..ID] let id_bytes: [u8; 4] = received_message[0..ID]
@@ -148,12 +147,8 @@ pub fn parse_message(
.block_on(get_peer_key(&username)) .block_on(get_peer_key(&username))
.expect("failed to retrieve public key"), .expect("failed to retrieve public key"),
}; };
match msgtype { println!("handshake JULIUS added");
HELLOREPLY => { handhsake_history.add_new_handshake(peer_pubkey, "".to_string(), ip);
handhsake_history.add_new_handshake(peer_pubkey, "".to_string(), ip);
}
_ => {}
}
let signature: [u8; SIGNATURE] = received_message let signature: [u8; SIGNATURE] = received_message
[LENGTH + msg_length..LENGTH + msg_length + SIGNATURE] [LENGTH + msg_length..LENGTH + msg_length + SIGNATURE]
.try_into() .try_into()
@@ -194,14 +189,10 @@ pub fn parse_message(
// Message handling // Message handling
let mut constructed_message: Option<Vec<u8>> = None; let mut constructed_message: Option<Vec<u8>> = None;
match msgtype { match msgtype {
// PING
//
// envoie un OK
PING => { PING => {
constructed_message = construct_message(OK, Vec::new(), id, crypto_pair); constructed_message = construct_message(OK, Vec::new(), id, crypto_pair);
} }
//
// OK
OK => { OK => {
let mut guard = messages_list.lock().unwrap(); let mut guard = messages_list.lock().unwrap();
let res = guard.get(&id); let res = guard.get(&id);
@@ -216,9 +207,7 @@ pub fn parse_message(
} }
} }
} }
//
// rien ?
// si NATTRAVERSALREQUEST alors
NATTRAVERSALREQUEST => { NATTRAVERSALREQUEST => {
// send ok & send nattraversalrequest2 to peer // send ok & send nattraversalrequest2 to peer
constructed_message = construct_message(OK, Vec::new(), id, crypto_pair); constructed_message = construct_message(OK, Vec::new(), id, crypto_pair);
@@ -277,10 +266,7 @@ pub fn parse_message(
); );
constructed_message = None; constructed_message = None;
} }
//
// ERROR
//
// affiche un msg d'erreur
ERROR => { ERROR => {
if let Ok(err_received) = if let Ok(err_received) =
String::from_utf8(received_message[LENGTH..(msg_length + LENGTH)].to_vec()) String::from_utf8(received_message[LENGTH..(msg_length + LENGTH)].to_vec())
@@ -292,10 +278,7 @@ pub fn parse_message(
let _ = cmd_tx_clone.send(NetworkEvent::Error(err_msg)); let _ = cmd_tx_clone.send(NetworkEvent::Error(err_msg));
} }
} }
// HELLO
//
// envoie une hello reply
//
HELLO => { HELLO => {
let mut payload = Vec::new(); let mut payload = Vec::new();
@@ -318,10 +301,7 @@ pub fn parse_message(
return helloreply; return helloreply;
} }
// HELLOREPLY
//
//
// ajoute a la liste des peers handshake
HELLOREPLY => { HELLOREPLY => {
// ajoute l'username a la liste des peers handshake // ajoute l'username a la liste des peers handshake
let received_length = u16::from_be_bytes( let received_length = u16::from_be_bytes(
@@ -365,13 +345,6 @@ pub fn parse_message(
None => {} None => {}
} }
} }
//
// ROOTREQUEST
//
// envoie un root reply
//
// ROOTREPLY
//
ROOTREPLY => { ROOTREPLY => {
// recuperer le pseudo du peers ayant repondu // recuperer le pseudo du peers ayant repondu
let peers_exist = handhsake_history.get_peer_info_ip(ip.to_string()); let peers_exist = handhsake_history.get_peer_info_ip(ip.to_string());
@@ -422,16 +395,6 @@ pub fn parse_message(
} }
} }
} }
//
// DATUMREQUEST
//
// envoie le datum
//
// NODATUM
//
// affiche un msg d'erreur
//
// DATUM
DATUM => { DATUM => {
let mut guard = messages_list.lock().expect("Échec du verrouillage"); let mut guard = messages_list.lock().expect("Échec du verrouillage");
let res = guard.get(&id); let res = guard.get(&id);
@@ -461,65 +424,6 @@ pub fn parse_message(
None => {} None => {}
} }
} }
// parcourt le directory recu ou le big directory et renvoie une DATUMREQUEST pour chaque
// directory ou big directory lu
//
// NATTRAVERSALREQUEST
//
// repond OK et envoie un NATTRAVERSALREQUEST2 au pair B
//
// NATTRAVERSALREQUEST2
//
// envoie OK à S puis envoie un ping à S
// PING
//
// envoie un OK
//
// OK
//
// si NATTRAVERSALREQUEST alors
//
// ERROR
//
// affiche un msg d'erreur
//
// HELLO
//
// envoie une hello reply
//
// HELLOREPLY
//
// envoie un root request
//
// ROOTREQUEST
//
// envoie un root reply
//
// ROOTREPLY
//
// envoie un datum request
//
// DATUMREQUEST
//
// envoie le datum
//
// NODATUM
//
// affiche un msg d'erreur
//
// DATUM
//
// parcourt le directory recu ou le big directory et renvoie une DATUMREQUEST pour chaque
// directory ou big directory lu
//
// NATTRAVERSALREQUEST
//
// repond OK et envoie un NATTRAVERSALREQUEST2 au pair B
//
// NATTRAVERSALREQUEST2
//
// envoie OK à S puis envoie un ping à S
_ => return None, _ => return None,
}; };
constructed_message constructed_message

View File

@@ -7,6 +7,7 @@ use crate::message_handling::EventType;
use crate::message_handling::handle_recevied_message; use crate::message_handling::handle_recevied_message;
use crate::peers_refresh::HandshakeHistory; use crate::peers_refresh::HandshakeHistory;
use crate::threads_handling::Worker; use crate::threads_handling::Worker;
use std::clone;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::hash::Hash; use std::hash::Hash;
use std::net::SocketAddr; use std::net::SocketAddr;
@@ -261,15 +262,13 @@ pub fn start_retry_thread(
pub fn start_receving_thread( pub fn start_receving_thread(
shared_data: &mut P2PSharedData, shared_data: &mut P2PSharedData,
cmd_tx: crossbeam_channel::Sender<NetworkEvent>, cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
handshake_history: &Arc<Mutex<HandshakeHistory>>, handshake_history: Arc<HandshakeHistory>,
) { ) {
let sock_clone = shared_data.socket(); let sock_clone = shared_data.socket();
let cryptopair_clone = shared_data.cryptopair(); let cryptopair_clone = shared_data.cryptopair();
let senders_clone = shared_data.senders(); let senders_clone = shared_data.senders();
let messages_clone = shared_data.messages_list(); let messages_clone = shared_data.messages_list();
let servername_clone = shared_data.servername(); let servername_clone = shared_data.servername();
let handshake_clone = handshake_history.clone();
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let mut buf = [0u8; 1024]; let mut buf = [0u8; 1024];
loop { loop {
@@ -286,7 +285,7 @@ pub fn start_receving_thread(
&servername_clone, &servername_clone,
cmd_tx.clone(), cmd_tx.clone(),
src, src,
&handshake_clone, handshake_history.clone(),
); );
} }
Err(e) => eprintln!("Erreur de réception: {}", e), Err(e) => eprintln!("Erreur de réception: {}", e),

View File

@@ -1,6 +1,8 @@
// this class consists of a thread that will re send pings every time the first element // this class consists of a thread that will re send pings every time the first element
// of the stack is at the correct unix time // of the stack is at the correct unix time
pub use crate::message_handling::*;
use std::{ use std::{
collections::{HashMap, VecDeque}, collections::{HashMap, VecDeque},
net::{AddrParseError, Ipv4Addr, SocketAddr}, net::{AddrParseError, Ipv4Addr, SocketAddr},
@@ -11,7 +13,10 @@ use std::{
time::{self, Duration, SystemTime}, time::{self, Duration, SystemTime},
}; };
use crate::{NetworkEvent, threads_handling::Worker}; use crate::{
NetworkEvent, cryptographic_signature::CryptographicSignature,
messages_channels::MultipleSenders, threads_handling::Worker,
};
use crate::{ use crate::{
P2PSharedData, construct_message, generate_id, messages_structure, P2PSharedData, construct_message, generate_id, messages_structure,
registration::perform_handshake, registration::perform_handshake,
@@ -26,59 +31,35 @@ pub struct PeerInfo {
pub ip: SocketAddr, pub ip: SocketAddr,
} }
#[derive(Debug, Clone)]
pub struct HandshakeHistory { pub struct HandshakeHistory {
//time_k_ip_v: HashMap<u64, u64>, pub username_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
username_k_peerinfo_v: HashMap<String, PeerInfo>, ip_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
ip_k_peerinfo_v: HashMap<String, PeerInfo>,
} }
impl HandshakeHistory { impl HandshakeHistory {
pub fn new() -> HandshakeHistory { pub fn new() -> HandshakeHistory {
HandshakeHistory { HandshakeHistory {
//time_k_ip_v: HashMap::new(), username_k_peerinfo_v: Arc::new(Mutex::new(HashMap::new())),
//ip_k_peerinfo_v: HashMap::new(), ip_k_peerinfo_v: Arc::new(Mutex::new(HashMap::new())),
username_k_peerinfo_v: HashMap::new(),
ip_k_peerinfo_v: HashMap::new(),
} }
} }
/*pub fn update_handshake(&self) { pub fn get_peer_info_username(&self, username: String) -> Option<PeerInfo> {
let hashmap_shared = Arc::new(self.username_k_peerinfo_v); //self.username_k_peerinfo_v.get(&username).clone()
thread::spawn(move || {
let selfhashmap = hashmap_shared.clone();
loop {
for peer in selfhashmap.keys() {
let peer_ip = selfhashmap.get(peer);
// send ping
}
let mut child = Command::new("sleep").arg("10").spawn().unwrap();
let _result = child.wait().unwrap();
}
});
}*/
pub fn get_peer_info_username(&self, username: String) -> Option<&PeerInfo> { let guard = self.username_k_peerinfo_v.lock().unwrap();
self.username_k_peerinfo_v.get(&username).clone()
guard.get(&username).cloned()
} }
pub fn get_peer_info_ip(&self, ip: String) -> Option<&PeerInfo> { pub fn get_peer_info_ip(&self, ip: String) -> Option<PeerInfo> {
self.ip_k_peerinfo_v.get(&ip).clone() let guard = self.ip_k_peerinfo_v.lock().unwrap();
guard.get(&ip).cloned()
} }
pub fn update_handshake(&self) -> Worker { pub fn update_peer_info(&self, ip: String, username: String) {
let map_clone: Arc<HashMap<String, PeerInfo>> =
Arc::new(self.username_k_peerinfo_v.clone());
let map_for_thread = Arc::clone(&map_clone);
let handle = thread::spawn(move || {
loop {
for (peer, peerinfo) in map_for_thread.iter() {}
thread::sleep(Duration::from_secs(10));
}
});
Worker::spawn(handle, crate::threads_handling::WorkerType::PING)
}
pub fn update_peer_info(&mut self, ip: String, username: String) {
let peerinfo = self.get_peer_info_ip(ip.clone()); let peerinfo = self.get_peer_info_ip(ip.clone());
match peerinfo { match peerinfo {
Some(peer_info) => match ip.parse::<SocketAddr>() { Some(peer_info) => match ip.parse::<SocketAddr>() {
@@ -88,8 +69,18 @@ impl HandshakeHistory {
pubkey: peer_info.pubkey, pubkey: peer_info.pubkey,
ip: addr, ip: addr,
}; };
self.ip_k_peerinfo_v.insert(ip, new_peer_info.clone()); let mut guardb = self.ip_k_peerinfo_v.lock().unwrap();
self.username_k_peerinfo_v.insert(username, new_peer_info); guardb.insert(ip.to_string(), new_peer_info.clone());
let mut guard = self.username_k_peerinfo_v.lock().unwrap();
guard.insert(username.to_string(), new_peer_info);
println!(
"handshake added: {}, {}, {}",
username.to_string(),
ip.to_string(),
guard.len(),
);
} }
Err(e) => eprintln!("parse error: {}", e), Err(e) => eprintln!("parse error: {}", e),
}, },
@@ -99,43 +90,56 @@ impl HandshakeHistory {
} }
} }
pub fn add_new_handshake(&mut self, hash: VerifyingKey, username: String, ip: SocketAddr) { pub fn add_new_handshake(&self, hash: VerifyingKey, username: String, ip: SocketAddr) {
let peerinfo = PeerInfo { let peerinfo = PeerInfo {
username: username.clone(), username: username.clone(),
pubkey: hash, pubkey: hash,
ip, ip,
}; };
self.username_k_peerinfo_v let mut guard = self.username_k_peerinfo_v.lock().unwrap();
.insert(username, peerinfo.clone()); guard.insert(username, peerinfo.clone());
self.ip_k_peerinfo_v let mut guardb = self.ip_k_peerinfo_v.lock().unwrap();
.insert(ip.to_string(), peerinfo.clone()); guardb.insert(ip.to_string(), peerinfo.clone());
} }
} }
pub fn perform_discover( pub fn update_handshake(
username: String, senders: Arc<MultipleSenders>,
hash: String, crypto_pair: Arc<CryptographicSignature>,
sd: &P2PSharedData, messages_list: Arc<Mutex<HashMap<i32, EventType>>>,
server_ip: String, username_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
event_tx: Sender<NetworkEvent>, ) -> Worker {
) { let map_for_thread = username_k_peerinfo_v.clone();
// first, sends handshake let handle = thread::spawn(move || {
if hash == "root" { loop {
perform_handshake(sd, username, server_ip, event_tx, false); println!("loop boucle");
/*if let Some(data) = construct_message( let guard = map_for_thread.lock().unwrap();
messages_structure::ROOTREQUEST, println!("len:{}", guard.len());
Vec::new(), for (peer, peerinfo) in guard.iter() {
generate_id(), let id = generate_id();
sd.cryptopair_ref(), let mut map = messages_list.lock().unwrap();
) { map.insert(id, EventType::Ping);
if let Some(peerinfo) = sd.handshake_ref() { drop(map);
sd.senders_ref() let pingrequest = construct_message(PING, Vec::new(), id, &crypto_pair);
.send_via(0, data, peerinfo.ip.to_string(), false); if let Some(ping) = pingrequest {
senders.add_message_to_retry_queue(
ping.clone(),
peerinfo.ip.to_string(),
false,
);
senders.send_dispatch(
ping,
peerinfo.ip.to_string(),
false,
messages_list.clone(),
);
println!("ping envoye a {}", peer);
}
} }
}*/ thread::sleep(Duration::from_secs(2));
} else { }
// envoyer un datum request });
} Worker::spawn(handle, crate::threads_handling::WorkerType::PING)
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -71,6 +71,12 @@ pub async fn perform_handshake(
payload.extend_from_slice(&0u32.to_be_bytes()); payload.extend_from_slice(&0u32.to_be_bytes());
payload.extend_from_slice(&crypto_pair.username.clone().as_bytes()); payload.extend_from_slice(&crypto_pair.username.clone().as_bytes());
let hello_handshake = construct_message(1, payload, id, crypto_pair); let hello_handshake = construct_message(1, payload, id, crypto_pair);
if is_server_handshake {
sd.add_message(id, EventType::Hello);
} else {
sd.add_message(id, EventType::HelloThenRootRequest);
}
match hello_handshake { match hello_handshake {
Some(handshake_message) => { Some(handshake_message) => {
senders.send_dispatch( senders.send_dispatch(

51
todo.md
View File

@@ -1,58 +1,25 @@
# Todo # Todo
## peer discovery ## bugfix
## handshake - ajouter hello et nat a l'exp backoff OK
- peers n'ayant pas d'adresse OK
- verifier le refresh des peers
# Todo
## peer discovery
- get rsquest to the uri /peers/
## registration with the server
- generation of the cryptographic key OK
- put request to the uri (check if the peer is already connected) OK
- udp handshakes OK
- get request to the uri /peers/key to get the public key of a peer OK
- get request to the uri /peers/key/addresses OK
## handshake
- handshake structure OK
- 5min timeout after handshake
- matain connection every 4 min
## data transfer
- request structure
- root/root reply structure
- datum/nodatum and datum structures
- nattraversal 1 and 2 structures
- setting in gui to act as a relay - setting in gui to act as a relay
- chunk, directory, big, bigdirectory structures
## fonctionnalités application
## nat traversal
- make hello and helloreply messages set the first extension bit to announce that peer is available for nat traversal - make hello and helloreply messages set the first extension bit to announce that peer is available for nat traversal
- implement actual nat traversal requests - implement actual nat traversal requests
- implement nat traversal : - implement nat traversal :
- if hello/helloreply doesnt work with a peer, find a peer that supports nat traversal (server in priority) then begin protocol - if hello/helloreply doesnt work with a peer, find a peer that supports nat traversal (server in priority) then begin protocol
fonctionnalités : ## fonctionnalités :
rechercher les fichiers d'un pair rechercher les fichiers d'un pair
telechargement des fichiers telechargement des fichiers
choisir un dossier à partager choisir un dossier à partager
choisir le nombre de canaux choisir le nombre de canaux
handshake server DOING
se deconnecter du réseau DOING
## autre ## autre
socket ipv6 socket ipv6
@@ -71,3 +38,9 @@ socket ipv6
- generer une clé publique OK - generer une clé publique OK
- verifier signature OK - verifier signature OK
- 2 channels -> un pour envoyer et un pour recevoir OK - 2 channels -> un pour envoyer et un pour recevoir OK
- get rsquest to the uri /peers/ OK
- request structure
- root/root reply structure
- datum/nodatum and datum structures
- nattraversal 1 and 2 structures
- chunk, directory, big, bigdirectory structures