This commit is contained in:
TIBERGHIEN corentin
2026-01-21 02:45:48 +01:00
parent dacedd1ceb
commit 8b2ab4861b
10 changed files with 351 additions and 460 deletions

View File

@@ -3,51 +3,12 @@ use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
// --- Constants ---
const MAX_CHUNK_DATA_SIZE: usize = 1024;
const MAX_DIRECTORY_ENTRIES: usize = 16;
const MAX_BIG_CHILDREN: usize = 32;
const MIN_BIG_CHILDREN: usize = 2;
const FILENAME_HASH_SIZE: usize = 32;
const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
fn hash(data: &[u8]) -> NodeHash {
let mut hasher = DefaultHasher::new();
data.hash(&mut hasher);
let hash_u64 = hasher.finish();
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
for i in 0..8 {
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
}
hash_array // The rest remains 0, satisfying the 32-byte requirement
}
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
let mut rng = rand::rng();
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
// Generate a random length for the base name
let name_len = rng.random_range(5..21);
// Generate random alphanumeric characters
for i in 0..name_len {
let char_code = rng.random_range(97..123); // 'a' through 'z'
if i < FILENAME_HASH_SIZE {
filename_bytes[i] = char_code as u8;
}
}
// Append a common extension
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
let ext_bytes = ext.as_bytes();
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
if start_index < FILENAME_HASH_SIZE {
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
}
filename_bytes
}
pub const MAX_CHUNK_DATA_SIZE: usize = 1024;
pub const MAX_DIRECTORY_ENTRIES: usize = 16;
pub const MAX_BIG_CHILDREN: usize = 32;
pub const MIN_BIG_CHILDREN: usize = 2;
pub const FILENAME_HASH_SIZE: usize = 32;
pub const DIRECTORY_ENTRY_SIZE: usize = FILENAME_HASH_SIZE * 2; // 64 bytes
pub type NodeHash = [u8; FILENAME_HASH_SIZE];
@@ -80,100 +41,6 @@ impl MerkleTree {
}
}
/*fn generate_random_file_node(
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
if !is_big {
// Generate a simple Chunk Node
let node = MerkleNode::Chunk(ChunkNode::new_random());
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a Big Node (a file composed of chunks)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
let mut children_hashes = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
let chunk_hash = hash(&chunk_node.serialize());
storage.insert(chunk_hash, chunk_node);
children_hashes.push(chunk_hash);
}
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}*/
/*fn generate_random_directory_node(
depth: u32,
max_depth: u32,
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let current_depth = depth + 1;
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
if !is_big_dir || current_depth >= max_depth {
// Generate a simple Directory Node (leaf level directory)
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
let mut entries = Vec::with_capacity(num_entries);
for _ in 0..num_entries {
if rng.random_bool(0.7) {
// 70% chance of creating a file (Chunk/Big)
let file_hash = generate_random_file_node(storage)?;
let entry = DirectoryEntry {
filename: generate_random_filename(),
content_hash: file_hash,
};
entries.push(entry);
} else if current_depth < max_depth {
// 30% chance of creating a subdirectory
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
// Create a basic directory entry name
let mut filename_bytes = [0; 32];
let subdir_name = format!("dir_{}", current_depth);
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
let entry = DirectoryEntry {
filename: filename_bytes,
content_hash: dir_hash,
};
entries.push(entry);
}
}
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a BigDirectory Node (internal directory structure)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
let mut children = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Directory or BigDirectory
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
children.push(child_hash);
}
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}*/
#[derive(Debug, Clone)]
pub struct ChunkNode {
pub data: Vec<u8>,
@@ -208,7 +75,7 @@ impl ChunkNode {
// Helper struct
#[derive(Debug, Clone)]
pub struct DirectoryEntry {
pub filename: Vec<u8>,
pub filename: [u8; FILENAME_HASH_SIZE],
pub content_hash: NodeHash,
}
@@ -240,7 +107,7 @@ pub struct BigNode {
}
impl BigNode {
/*pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
let n = children_hashes.len();
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
return Err(format!(
@@ -249,17 +116,17 @@ impl BigNode {
));
}
Ok(BigNode { children_hashes })
}*/
}
}
#[derive(Debug, Clone)]
pub struct BigDirectoryNode {
//pub children_hashes: Vec<NodeHash>,
pub children_hashes: Vec<DirectoryEntry>,
pub children_hashes: Vec<NodeHash>,
// pub children_hashes: Vec<DirectoryEntry>,
}
impl BigDirectoryNode {
/*pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
pub fn new(children_hashes: Vec<NodeHash>) -> Result<Self, String> {
let n = children_hashes.len();
if n < MIN_BIG_CHILDREN || n > MAX_BIG_CHILDREN {
return Err(format!(
@@ -268,14 +135,6 @@ impl BigDirectoryNode {
));
}
Ok(BigDirectoryNode { children_hashes })
}*/
pub fn new(entries: Vec<DirectoryEntry>) -> Result<Self, String> {
if entries.len() > MAX_DIRECTORY_ENTRIES {
return Err(format!("Directory exceeds {} bytes", entries.len()));
}
Ok(BigDirectoryNode {
children_hashes: entries,
})
}
}
@@ -310,73 +169,10 @@ impl MerkleNode {
}
MerkleNode::BigDirectory(node) => {
for hash in &node.children_hashes {
bytes.extend_from_slice(&hash.content_hash);
bytes.extend_from_slice(hash);
}
}
}
bytes
}
/*pub fn generate_random_tree(
max_depth: u32,
) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
let mut storage = HashMap::new();
// Start tree generation from the root directory at depth 0
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
Ok((root_hash, storage))
}*/
/*pub fn generate_base_tree() -> (NodeHash, HashMap<NodeHash, MerkleNode>) {
let mut res = HashMap::new();
let node1 = MerkleNode::Chunk(ChunkNode::new_random());
let hash1 = hash(&node1.serialize());
let node2 = MerkleNode::Chunk(ChunkNode::new_random());
let hash2 = hash(&node2.serialize());
res.insert(hash1, node1);
res.insert(hash2, node2);
let node3 = MerkleNode::Chunk(ChunkNode::new_random());
let hash3 = hash(&node3.serialize());
res.insert(hash3, node3);
let dir1 = MerkleNode::Directory(DirectoryNode {
entries: [DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash3,
}]
.to_vec(),
});
let hash_dir1 = hash(&dir1.serialize());
res.insert(hash_dir1, dir1);
let root = MerkleNode::Directory(DirectoryNode {
entries: [
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash1,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash2,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash_dir1,
},
]
.to_vec(),
});
let root_hash = hash(&root.serialize());
res.insert(root_hash, root);
(root_hash, res)
}*/
}

View File

@@ -0,0 +1,200 @@
use crate::data::*;
use rand::{Rng, rng};
use std::collections::HashMap;
use std::hash::{DefaultHasher, Hash, Hasher};
fn hash(data: &[u8]) -> NodeHash {
let mut hasher = DefaultHasher::new();
data.hash(&mut hasher);
let hash_u64 = hasher.finish();
let mut hash_array = [0u8; FILENAME_HASH_SIZE];
// Simple way to spread a 64-bit hash across 32 bytes for a unique-ish ID
for i in 0..8 {
hash_array[i] = (hash_u64 >> (i * 8)) as u8;
}
hash_array // The rest remains 0, satisfying the 32-byte requirement
}
fn generate_random_filename() -> [u8; FILENAME_HASH_SIZE] {
let mut rng = rand::rng();
let mut filename_bytes = [0; FILENAME_HASH_SIZE];
// Generate a random length for the base name
let name_len = rng.random_range(5..21);
// Generate random alphanumeric characters
for i in 0..name_len {
let char_code = rng.random_range(97..123); // 'a' through 'z'
if i < FILENAME_HASH_SIZE {
filename_bytes[i] = char_code as u8;
}
}
// Append a common extension
let ext = if rng.random_bool(0.5) { ".txt" } else { ".dat" };
let ext_bytes = ext.as_bytes();
let start_index = name_len.min(FILENAME_HASH_SIZE - ext_bytes.len());
if start_index < FILENAME_HASH_SIZE {
filename_bytes[start_index..(start_index + ext_bytes.len())].copy_from_slice(ext_bytes);
}
filename_bytes
}
fn generate_random_file_node(
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let is_big = rng.random_bool(0.2); // 20% chance of being a big file
if !is_big {
// Generate a simple Chunk Node
let node = MerkleNode::Chunk(ChunkNode::new_random());
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a Big Node (a file composed of chunks)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(8)); // Limit complexity
let mut children_hashes = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Chunk or Big; for simplicity, we only generate Chunk children here.
let chunk_node = MerkleNode::Chunk(ChunkNode::new_random());
let chunk_hash = hash(&chunk_node.serialize());
storage.insert(chunk_hash, chunk_node);
children_hashes.push(chunk_hash);
}
let node = MerkleNode::Big(BigNode::new(children_hashes)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}
fn generate_random_directory_node(
depth: u32,
max_depth: u32,
storage: &mut HashMap<NodeHash, MerkleNode>,
) -> Result<NodeHash, String> {
let mut rng = rng();
let current_depth = depth + 1;
let is_big_dir = rng.random_bool(0.3) && current_depth < max_depth;
if !is_big_dir || current_depth >= max_depth {
// Generate a simple Directory Node (leaf level directory)
let num_entries = rng.random_range(1..=MAX_DIRECTORY_ENTRIES.min(5)); // Limit directory size for testing
let mut entries = Vec::with_capacity(num_entries);
for _ in 0..num_entries {
if rng.random_bool(0.7) {
// 70% chance of creating a file (Chunk/Big)
let file_hash = generate_random_file_node(storage)?;
let entry = DirectoryEntry {
filename: generate_random_filename(),
content_hash: file_hash,
};
entries.push(entry);
} else if current_depth < max_depth {
// 30% chance of creating a subdirectory
let dir_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
// Create a basic directory entry name
let mut filename_bytes = [0; 32];
let subdir_name = format!("dir_{}", current_depth);
filename_bytes[..subdir_name.len()].copy_from_slice(subdir_name.as_bytes());
let entry = DirectoryEntry {
filename: filename_bytes,
content_hash: dir_hash,
};
entries.push(entry);
}
}
let node = MerkleNode::Directory(DirectoryNode::new(entries)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
} else {
// Generate a BigDirectory Node (internal directory structure)
let num_children = rng.random_range(MIN_BIG_CHILDREN..=MAX_BIG_CHILDREN.min(4)); // Limit children count
let mut children = Vec::with_capacity(num_children);
for _ in 0..num_children {
// Children must be Directory or BigDirectory
let child_hash = generate_random_directory_node(current_depth, max_depth, storage)?;
children.push(child_hash);
}
let node = MerkleNode::BigDirectory(BigDirectoryNode::new(children)?);
let hash = hash(&node.serialize());
storage.insert(hash, node);
Ok(hash)
}
}
pub fn generate_random_tree(
max_depth: u32,
) -> Result<(NodeHash, HashMap<NodeHash, MerkleNode>), String> {
let mut storage = HashMap::new();
// Start tree generation from the root directory at depth 0
let root_hash = generate_random_directory_node(0, max_depth, &mut storage)?;
Ok((root_hash, storage))
}
pub fn generate_base_tree() -> (NodeHash, HashMap<NodeHash, MerkleNode>) {
let mut res = HashMap::new();
let node1 = MerkleNode::Chunk(ChunkNode::new_random());
let hash1 = hash(&node1.serialize());
let node2 = MerkleNode::Chunk(ChunkNode::new_random());
let hash2 = hash(&node2.serialize());
res.insert(hash1, node1);
res.insert(hash2, node2);
let node3 = MerkleNode::Chunk(ChunkNode::new_random());
let hash3 = hash(&node3.serialize());
res.insert(hash3, node3);
let dir1 = MerkleNode::Directory(DirectoryNode {
entries: [DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash3,
}]
.to_vec(),
});
let hash_dir1 = hash(&dir1.serialize());
res.insert(hash_dir1, dir1);
let root = MerkleNode::Directory(DirectoryNode {
entries: [
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash1,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash2,
},
DirectoryEntry {
filename: generate_random_filename(),
content_hash: hash_dir1,
},
]
.to_vec(),
});
let root_hash = hash(&root.serialize());
res.insert(root_hash, root);
(root_hash, res)
}

View File

@@ -31,7 +31,7 @@ pub fn parse_received_datum(
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
// envoyer un datum request
dir_entries.push(DirectoryEntry {
filename: name.to_vec(),
filename: name.try_into().expect("incorrect size"),
content_hash: hash,
});
}
@@ -71,10 +71,7 @@ pub fn parse_received_datum(
let mut hash = [0u8; 32];
hash.copy_from_slice(&recevied_datum[offset + 32..offset + 64]);
// envoyer un datum request
dir_entries.push(DirectoryEntry {
filename: name.to_vec(),
content_hash: hash,
});
dir_entries.push(hash);
}
let current = BigDirectoryNode::new(dir_entries);

View File

@@ -1,5 +1,6 @@
mod cryptographic_signature;
mod data;
mod datum_generation;
mod datum_parsing;
mod message_handling;
mod messages_channels;
@@ -9,6 +10,7 @@ mod registration;
mod server_communication;
mod threads_handling;
use crate::peers_refresh::*;
use crate::{
cryptographic_signature::CryptographicSignature,
message_handling::EventType,
@@ -22,8 +24,10 @@ use crate::{
threads_handling::Worker,
};
use std::{
clone,
io::Error,
net::{IpAddr, Ipv4Addr, UdpSocket},
time::Duration,
};
use std::{
net::SocketAddr,
@@ -42,6 +46,7 @@ pub struct P2PSharedData {
use bytes::Bytes;
use p256::pkcs8::der::pem::Base64Encoder;
use reqwest::Client;
impl P2PSharedData {
pub fn new(
@@ -63,9 +68,6 @@ impl P2PSharedData {
let server_name = Arc::new(Mutex::new("".to_string()));
let handhsake_peers = Arc::new(HandshakeHistory::new());
let worker = handhsake_peers.update_handshake();
threads.push(worker);
Ok(P2PSharedData {
shared_socket: shared_socket,
shared_cryptopair: shared_cryptopair,
@@ -100,13 +102,12 @@ impl P2PSharedData {
pub fn socket_ref(&self) -> &UdpSocket {
&*self.shared_socket
}
pub fn handshakes(&self) -> Arc<HandshakeHistory> {
self.handshake_peers.clone()
}
pub fn cryptopair_ref(&self) -> &CryptographicSignature {
&*self.shared_cryptopair
}
pub fn handshake_ref(&self) -> &HandshakeHistory {
&*self.handshake_peers
}
pub fn messages_list_ref(&self) -> &Mutex<HashMap<i32, EventType>> {
&*self.shared_messageslist
@@ -195,8 +196,6 @@ pub fn start_p2p_executor(
// Use tokio to spawn the asynchronous networking logic
tokio::task::spawn(async move {
// P2P/Networking Setup goes here
let handshake_history = Arc::new(Mutex::new(HandshakeHistory::new()));
let handshake_clone = handshake_history.clone();
println!("Network executor started.");
@@ -208,13 +207,20 @@ pub fn start_p2p_executor(
NetworkCommand::ServerHandshake(username, ip) => {
println!("server handshake called");
if let Some(sd) = shared_data.as_mut() {
start_receving_thread(sd, event_tx.clone(), &handshake_clone);
start_receving_thread(sd, event_tx.clone(), sd.handshakes());
start_retry_thread(
sd.senders(),
4,
sd.messages_list(),
sd.threads().as_mut(),
);
update_handshake(
sd.senders(),
sd.cryptopair(),
sd.messages_list(),
sd.handshake_peers.username_k_peerinfo_v.clone(),
);
let res =
perform_handshake(&sd, username, ip, event_tx.clone(), true).await;
} else {
@@ -234,10 +240,7 @@ pub fn start_p2p_executor(
NetworkCommand::Discover(username, hash, ip) => {
// envoie un handshake au peer, puis un root request
if let Some(sd) = shared_data.as_ref() {
let res = {
let m = handshake_clone.lock().unwrap();
m.get_peer_info_username(username.clone()).cloned()
};
let res = sd.handshake_peers.get_peer_info_username(username.clone());
match res {
Some(peerinfo) => {
let id = generate_id();
@@ -392,7 +395,11 @@ pub fn start_p2p_executor(
);
}
}
None => {}
None => {
let err_msg =
format!("failed to retreive socket address:").to_string();
let res = event_tx.send(NetworkEvent::Error(err_msg));
}
}
}
println!("[Network] Ping() called");
@@ -428,13 +435,15 @@ pub fn start_p2p_executor(
print!("{:?}", payload.clone());
let id = generate_id();
let natreq = construct_message(
NATTRAVERSALREQUEST,
payload.clone(),
generate_id(),
id.clone(),
&sd.cryptopair(),
);
sd.add_message(id, EventType::NatTraversal);
sd.senders_ref().send_dispatch(
natreq.expect(
"couldnt construct message nattraversalrequest2",
@@ -502,11 +511,14 @@ fn parse_pack(s: &str) -> Option<[u8; 6]> {
///
pub async fn get_socket_address(username: String, ip: String) -> Option<SocketAddr> {
let client = reqwest::Client::new();
let client = Client::builder()
.timeout(Duration::from_secs(5))
.build()
.expect("cannot create client");
let uri = format!("{}/peers/{}/addresses", ip, username);
let res = client.get(uri).send().await.expect("couldnt get response");
if res.status().is_success() {
println!("Successfully retreived the addresses.");
println!("Successfully retreived the addresses. {}", res.status());
} else {
eprintln!(
"Failed to get the peers addresses from the server. Status: {}",

View File

@@ -36,7 +36,7 @@ const LENGTH: usize = 7;
const EXTENSIONS: usize = 4;
const SIGNATURE: usize = 64;
const PING: u8 = 0;
pub const PING: u8 = 0;
const OK: u8 = 128;
const ERROR: u8 = 129;
const HELLO: u8 = 1;
@@ -58,7 +58,7 @@ pub fn handle_recevied_message(
server_name: &String,
cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
ip: SocketAddr,
handhsake_history: &Arc<Mutex<HandshakeHistory>>,
handhsake_history: Arc<HandshakeHistory>,
) {
if recevied_message.len() < 4 {
return;
@@ -114,10 +114,9 @@ pub fn parse_message(
cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
ip: SocketAddr,
messages_list: &Arc<Mutex<HashMap<i32, EventType>>>,
handhsake_history_mutex: &Arc<Mutex<HandshakeHistory>>,
handhsake_history: Arc<HandshakeHistory>,
senders: &MultipleSenders,
) -> Option<Vec<u8>> {
let mut handhsake_history = handhsake_history_mutex.lock().unwrap();
let cmd_tx_clone = cmd_tx.clone();
let id_bytes: [u8; 4] = received_message[0..ID]
@@ -148,12 +147,8 @@ pub fn parse_message(
.block_on(get_peer_key(&username))
.expect("failed to retrieve public key"),
};
match msgtype {
HELLOREPLY => {
handhsake_history.add_new_handshake(peer_pubkey, "".to_string(), ip);
}
_ => {}
}
println!("handshake JULIUS added");
handhsake_history.add_new_handshake(peer_pubkey, "".to_string(), ip);
let signature: [u8; SIGNATURE] = received_message
[LENGTH + msg_length..LENGTH + msg_length + SIGNATURE]
.try_into()
@@ -194,14 +189,10 @@ pub fn parse_message(
// Message handling
let mut constructed_message: Option<Vec<u8>> = None;
match msgtype {
// PING
//
// envoie un OK
PING => {
constructed_message = construct_message(OK, Vec::new(), id, crypto_pair);
}
//
// OK
OK => {
let mut guard = messages_list.lock().unwrap();
let res = guard.get(&id);
@@ -216,9 +207,7 @@ pub fn parse_message(
}
}
}
//
// rien ?
// si NATTRAVERSALREQUEST alors
NATTRAVERSALREQUEST => {
// send ok & send nattraversalrequest2 to peer
constructed_message = construct_message(OK, Vec::new(), id, crypto_pair);
@@ -277,10 +266,7 @@ pub fn parse_message(
);
constructed_message = None;
}
//
// ERROR
//
// affiche un msg d'erreur
ERROR => {
if let Ok(err_received) =
String::from_utf8(received_message[LENGTH..(msg_length + LENGTH)].to_vec())
@@ -292,10 +278,7 @@ pub fn parse_message(
let _ = cmd_tx_clone.send(NetworkEvent::Error(err_msg));
}
}
// HELLO
//
// envoie une hello reply
//
HELLO => {
let mut payload = Vec::new();
@@ -318,10 +301,7 @@ pub fn parse_message(
return helloreply;
}
// HELLOREPLY
//
//
// ajoute a la liste des peers handshake
HELLOREPLY => {
// ajoute l'username a la liste des peers handshake
let received_length = u16::from_be_bytes(
@@ -365,13 +345,6 @@ pub fn parse_message(
None => {}
}
}
//
// ROOTREQUEST
//
// envoie un root reply
//
// ROOTREPLY
//
ROOTREPLY => {
// recuperer le pseudo du peers ayant repondu
let peers_exist = handhsake_history.get_peer_info_ip(ip.to_string());
@@ -422,16 +395,6 @@ pub fn parse_message(
}
}
}
//
// DATUMREQUEST
//
// envoie le datum
//
// NODATUM
//
// affiche un msg d'erreur
//
// DATUM
DATUM => {
let mut guard = messages_list.lock().expect("Échec du verrouillage");
let res = guard.get(&id);
@@ -461,65 +424,6 @@ pub fn parse_message(
None => {}
}
}
// parcourt le directory recu ou le big directory et renvoie une DATUMREQUEST pour chaque
// directory ou big directory lu
//
// NATTRAVERSALREQUEST
//
// repond OK et envoie un NATTRAVERSALREQUEST2 au pair B
//
// NATTRAVERSALREQUEST2
//
// envoie OK à S puis envoie un ping à S
// PING
//
// envoie un OK
//
// OK
//
// si NATTRAVERSALREQUEST alors
//
// ERROR
//
// affiche un msg d'erreur
//
// HELLO
//
// envoie une hello reply
//
// HELLOREPLY
//
// envoie un root request
//
// ROOTREQUEST
//
// envoie un root reply
//
// ROOTREPLY
//
// envoie un datum request
//
// DATUMREQUEST
//
// envoie le datum
//
// NODATUM
//
// affiche un msg d'erreur
//
// DATUM
//
// parcourt le directory recu ou le big directory et renvoie une DATUMREQUEST pour chaque
// directory ou big directory lu
//
// NATTRAVERSALREQUEST
//
// repond OK et envoie un NATTRAVERSALREQUEST2 au pair B
//
// NATTRAVERSALREQUEST2
//
// envoie OK à S puis envoie un ping à S
_ => return None,
};
constructed_message

View File

@@ -7,6 +7,7 @@ use crate::message_handling::EventType;
use crate::message_handling::handle_recevied_message;
use crate::peers_refresh::HandshakeHistory;
use crate::threads_handling::Worker;
use std::clone;
use std::collections::{HashMap, HashSet};
use std::hash::Hash;
use std::net::SocketAddr;
@@ -261,15 +262,13 @@ pub fn start_retry_thread(
pub fn start_receving_thread(
shared_data: &mut P2PSharedData,
cmd_tx: crossbeam_channel::Sender<NetworkEvent>,
handshake_history: &Arc<Mutex<HandshakeHistory>>,
handshake_history: Arc<HandshakeHistory>,
) {
let sock_clone = shared_data.socket();
let cryptopair_clone = shared_data.cryptopair();
let senders_clone = shared_data.senders();
let messages_clone = shared_data.messages_list();
let servername_clone = shared_data.servername();
let handshake_clone = handshake_history.clone();
let thread = thread::spawn(move || {
let mut buf = [0u8; 1024];
loop {
@@ -286,7 +285,7 @@ pub fn start_receving_thread(
&servername_clone,
cmd_tx.clone(),
src,
&handshake_clone,
handshake_history.clone(),
);
}
Err(e) => eprintln!("Erreur de réception: {}", e),

View File

@@ -1,6 +1,8 @@
// this class consists of a thread that will re send pings every time the first element
// of the stack is at the correct unix time
pub use crate::message_handling::*;
use std::{
collections::{HashMap, VecDeque},
net::{AddrParseError, Ipv4Addr, SocketAddr},
@@ -11,7 +13,10 @@ use std::{
time::{self, Duration, SystemTime},
};
use crate::{NetworkEvent, threads_handling::Worker};
use crate::{
NetworkEvent, cryptographic_signature::CryptographicSignature,
messages_channels::MultipleSenders, threads_handling::Worker,
};
use crate::{
P2PSharedData, construct_message, generate_id, messages_structure,
registration::perform_handshake,
@@ -26,59 +31,35 @@ pub struct PeerInfo {
pub ip: SocketAddr,
}
#[derive(Debug, Clone)]
pub struct HandshakeHistory {
//time_k_ip_v: HashMap<u64, u64>,
username_k_peerinfo_v: HashMap<String, PeerInfo>,
ip_k_peerinfo_v: HashMap<String, PeerInfo>,
pub username_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
ip_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
}
impl HandshakeHistory {
pub fn new() -> HandshakeHistory {
HandshakeHistory {
//time_k_ip_v: HashMap::new(),
//ip_k_peerinfo_v: HashMap::new(),
username_k_peerinfo_v: HashMap::new(),
ip_k_peerinfo_v: HashMap::new(),
username_k_peerinfo_v: Arc::new(Mutex::new(HashMap::new())),
ip_k_peerinfo_v: Arc::new(Mutex::new(HashMap::new())),
}
}
/*pub fn update_handshake(&self) {
let hashmap_shared = Arc::new(self.username_k_peerinfo_v);
thread::spawn(move || {
let selfhashmap = hashmap_shared.clone();
loop {
for peer in selfhashmap.keys() {
let peer_ip = selfhashmap.get(peer);
// send ping
}
let mut child = Command::new("sleep").arg("10").spawn().unwrap();
let _result = child.wait().unwrap();
}
});
}*/
pub fn get_peer_info_username(&self, username: String) -> Option<PeerInfo> {
//self.username_k_peerinfo_v.get(&username).clone()
pub fn get_peer_info_username(&self, username: String) -> Option<&PeerInfo> {
self.username_k_peerinfo_v.get(&username).clone()
let guard = self.username_k_peerinfo_v.lock().unwrap();
guard.get(&username).cloned()
}
pub fn get_peer_info_ip(&self, ip: String) -> Option<&PeerInfo> {
self.ip_k_peerinfo_v.get(&ip).clone()
pub fn get_peer_info_ip(&self, ip: String) -> Option<PeerInfo> {
let guard = self.ip_k_peerinfo_v.lock().unwrap();
guard.get(&ip).cloned()
}
pub fn update_handshake(&self) -> Worker {
let map_clone: Arc<HashMap<String, PeerInfo>> =
Arc::new(self.username_k_peerinfo_v.clone());
let map_for_thread = Arc::clone(&map_clone);
let handle = thread::spawn(move || {
loop {
for (peer, peerinfo) in map_for_thread.iter() {}
thread::sleep(Duration::from_secs(10));
}
});
Worker::spawn(handle, crate::threads_handling::WorkerType::PING)
}
pub fn update_peer_info(&mut self, ip: String, username: String) {
pub fn update_peer_info(&self, ip: String, username: String) {
let peerinfo = self.get_peer_info_ip(ip.clone());
match peerinfo {
Some(peer_info) => match ip.parse::<SocketAddr>() {
@@ -88,8 +69,18 @@ impl HandshakeHistory {
pubkey: peer_info.pubkey,
ip: addr,
};
self.ip_k_peerinfo_v.insert(ip, new_peer_info.clone());
self.username_k_peerinfo_v.insert(username, new_peer_info);
let mut guardb = self.ip_k_peerinfo_v.lock().unwrap();
guardb.insert(ip.to_string(), new_peer_info.clone());
let mut guard = self.username_k_peerinfo_v.lock().unwrap();
guard.insert(username.to_string(), new_peer_info);
println!(
"handshake added: {}, {}, {}",
username.to_string(),
ip.to_string(),
guard.len(),
);
}
Err(e) => eprintln!("parse error: {}", e),
},
@@ -99,43 +90,56 @@ impl HandshakeHistory {
}
}
pub fn add_new_handshake(&mut self, hash: VerifyingKey, username: String, ip: SocketAddr) {
pub fn add_new_handshake(&self, hash: VerifyingKey, username: String, ip: SocketAddr) {
let peerinfo = PeerInfo {
username: username.clone(),
pubkey: hash,
ip,
};
self.username_k_peerinfo_v
.insert(username, peerinfo.clone());
self.ip_k_peerinfo_v
.insert(ip.to_string(), peerinfo.clone());
let mut guard = self.username_k_peerinfo_v.lock().unwrap();
guard.insert(username, peerinfo.clone());
let mut guardb = self.ip_k_peerinfo_v.lock().unwrap();
guardb.insert(ip.to_string(), peerinfo.clone());
}
}
pub fn perform_discover(
username: String,
hash: String,
sd: &P2PSharedData,
server_ip: String,
event_tx: Sender<NetworkEvent>,
) {
// first, sends handshake
if hash == "root" {
perform_handshake(sd, username, server_ip, event_tx, false);
/*if let Some(data) = construct_message(
messages_structure::ROOTREQUEST,
Vec::new(),
generate_id(),
sd.cryptopair_ref(),
) {
if let Some(peerinfo) = sd.handshake_ref() {
sd.senders_ref()
.send_via(0, data, peerinfo.ip.to_string(), false);
pub fn update_handshake(
senders: Arc<MultipleSenders>,
crypto_pair: Arc<CryptographicSignature>,
messages_list: Arc<Mutex<HashMap<i32, EventType>>>,
username_k_peerinfo_v: Arc<Mutex<HashMap<String, PeerInfo>>>,
) -> Worker {
let map_for_thread = username_k_peerinfo_v.clone();
let handle = thread::spawn(move || {
loop {
println!("loop boucle");
let guard = map_for_thread.lock().unwrap();
println!("len:{}", guard.len());
for (peer, peerinfo) in guard.iter() {
let id = generate_id();
let mut map = messages_list.lock().unwrap();
map.insert(id, EventType::Ping);
drop(map);
let pingrequest = construct_message(PING, Vec::new(), id, &crypto_pair);
if let Some(ping) = pingrequest {
senders.add_message_to_retry_queue(
ping.clone(),
peerinfo.ip.to_string(),
false,
);
senders.send_dispatch(
ping,
peerinfo.ip.to_string(),
false,
messages_list.clone(),
);
println!("ping envoye a {}", peer);
}
}
}*/
} else {
// envoyer un datum request
}
thread::sleep(Duration::from_secs(2));
}
});
Worker::spawn(handle, crate::threads_handling::WorkerType::PING)
}
#[cfg(test)]

View File

@@ -71,6 +71,12 @@ pub async fn perform_handshake(
payload.extend_from_slice(&0u32.to_be_bytes());
payload.extend_from_slice(&crypto_pair.username.clone().as_bytes());
let hello_handshake = construct_message(1, payload, id, crypto_pair);
if is_server_handshake {
sd.add_message(id, EventType::Hello);
} else {
sd.add_message(id, EventType::HelloThenRootRequest);
}
match hello_handshake {
Some(handshake_message) => {
senders.send_dispatch(