Skip to content

Commit

Permalink
Merge pull request #6 from lbennett-stacki/checksum
Browse files Browse the repository at this point in the history
feat: impl checksum
  • Loading branch information
lbennett-stacki authored Jul 8, 2024
2 parents a1e377c + f7adfd5 commit 98a20d9
Show file tree
Hide file tree
Showing 8 changed files with 75 additions and 52 deletions.
20 changes: 10 additions & 10 deletions cloud/leptos/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ http = "1"
[features]
hydrate = ["leptos/hydrate", "leptos_meta/hydrate", "leptos_router/hydrate"]
ssr = [
"dep:axum",
"dep:tokio",
"dep:tower",
"dep:tower-http",
"dep:leptos_axum",
"leptos/ssr",
"leptos_meta/ssr",
"leptos_router/ssr",
"dep:tracing",
"dep:axum",
"dep:tokio",
"dep:tower",
"dep:tower-http",
"dep:leptos_axum",
"leptos/ssr",
"leptos_meta/ssr",
"leptos_router/ssr",
"dep:tracing",
]

# Defines a size-optimized profile for the WASM bundle in release mode
Expand All @@ -41,7 +41,7 @@ ssr = [
# The name used by wasm-bindgen/cargo-leptos for the JS/WASM bundle. Defaults to the crate name
output-name = "leptos"

# The site root folder is where cargo-leptos generate all output. WARNING: all content of this folder will be erased on a rebuild. Use it in your server setup.
# The site root folder is where cargo-leptos generate all output.
site-root = "target/site"

# The site-root relative folder where all compiled output (JS, WASM and CSS) is written
Expand Down
3 changes: 0 additions & 3 deletions packages/providers/core/lib/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ Majors only. e.g. 0x01, 0x02, 0x03
16-bit ones' complement of the header and payload.
The checksum is initially set to 0 for checksum calculation.

1. Decode payload as bytes
1. Generate a header version, opcode, 0x0 (nil checksum), payload length
1. Group by 16 bit words
1. Pad 0's to complete words
Expand All @@ -57,5 +56,3 @@ The checksum is initially set to 0 for checksum calculation.
1. Wrap overflows
1. Add checksum
1. All 1's, else error

TODO: double check
1 change: 0 additions & 1 deletion packages/providers/core/lib/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ pub enum ServerError {
Read,
NoMessages,
SocketError,
// TODO: not error?
WouldBlock,
}

Expand Down
25 changes: 5 additions & 20 deletions packages/providers/core/lib/src/handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,24 +30,6 @@ impl Handler {
// "Actions" are also processed in their own green thread to allow for actions of varying
// durations to be processed concurrently without blocking queue I/O.
pub async fn handle(self: Arc<Self>) -> Result<ServerResult, ServerError> {
// TODO: think about whether or not we need to authenticate/authorize
// new clients in any way. It will probably go here.
// Had a thought today that the root problem is how to share a secret with both
// the client app and the nv unix system. I thought about setting at compile/build time for
// both but I feel like its essentially the same as saving to FS just w/ obfuscation.
// We need a dynamic way to auth a client with server, that the server is actually the
// inteded app. Maybe look into code signing for inspo?
// .......
// Lots more thinking today. Another idea...
// We leave most of the auth to the providers. e.g. locally setting AWS creds in env vars
// and conforming to AWS idea of identity on their machines.
// Another potential to maybe harden is to write some secret token at startup or build time
// that is consumed by the server and client once. This means only the first claim of the
// container will have access for the rest of the duration of the container?
// Also, we could ignore all the above and have some other agent/server that is
// completely separate and private. This would essentially be stepping into the role of the providers
// to some degree. For example, hashicorpt vault is secret management server that auths and comms with clients.

let (req_tx, req_rx) = mpsc::channel(16);
let (res_tx, res_rx) = mpsc::channel(16);
let cancel_token = CancellationToken::new();
Expand Down Expand Up @@ -171,7 +153,7 @@ impl Handler {

match result {
Err(ServerError::WouldBlock) => {
// TODO: test that this is required
// Re-queue received message if blocking
let _ = action_out_recover.send(message).await;
continue;
}
Expand Down Expand Up @@ -237,7 +219,10 @@ impl Handler {
res: Result<Message, ProviderError>,
) -> Result<(), ServerError> {
let message = res.unwrap_or("nout".as_bytes().into());
let serialized = self.serializer.serialize(message);
let serialized = self
.serializer
.serialize(message)
.map_err(|_| ServerError::Write)?;

match stream.try_write(&serialized) {
Ok(_) => Ok(()),
Expand Down
70 changes: 56 additions & 14 deletions packages/providers/core/lib/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ impl Opcode {

fn from_byte(byte: u8) -> Result<Opcode, ProtocolError> {
log::debug!("Converting opcode byte {} to opcode", byte);
// TODO: has to be a better way, also see similar conversions to improve in LSP, lexer and parser
match byte {
0x1 => Ok(Opcode::Initialize),
0x2 => Ok(Opcode::Destroy),
Expand Down Expand Up @@ -85,24 +84,23 @@ impl Header {
fn from_message(message: MessageSlice) -> Result<Header, ProtocolError> {
let version = *message.first().ok_or(ProtocolError::InvalidHeader)?;
log::debug!("Header from message, version:{:?}", version);

let opcode = *message.get(1).ok_or(ProtocolError::InvalidHeader)?;
log::debug!("Header from message, opcode:{:?}", opcode);

let checksum: [u8; 2] = message
.get(2..=3)
.ok_or(ProtocolError::InvalidHeader)?
.try_into()
.or(Err(ProtocolError::InvalidHeader))?;
log::debug!("Header from message, checksum:{:?}", checksum);

let payload_length: [u8; 4] = message
.get(4..=7)
.ok_or(ProtocolError::InvalidHeader)?
.try_into()
.or(Err(ProtocolError::InvalidHeader))?;
log::debug!("Header from message, payload_length:{:?}", payload_length);
log::debug!(
"Header from message, payload_length AS BE BYTES:{:?}",
u32::from_be_bytes(payload_length)
);

Ok(Header {
version,
Expand All @@ -114,23 +112,66 @@ impl Header {
}

impl MessageSerializer {
pub fn serialize(&self, payload: Payload) -> Message {
pub fn serialize(&self, payload: Payload) -> Result<Message, ProtocolError> {
log::debug!("Serializing payload {:?}", payload);

let header = self.generate_header(&payload);
let header = MessageSerializer::generate_header(&payload)?;

header.iter().chain(payload.iter()).copied().collect()
Ok(header.iter().chain(payload.iter()).copied().collect())
}

fn generate_header(&self, payload: &Payload) -> Vec<u8> {
let header = Header {
version: 0x0, // TODO: get from git tags at compile time??
fn generate_header(payload: &Payload) -> Result<Vec<u8>, ProtocolError> {
let mut header = Header {
version: 0x1,
opcode: Opcode::GetValue,
checksum: 0x0,
payload_length: payload.len() as u32, // TODO: try from
payload_length: payload
.len()
.try_into()
.map_err(|_| ProtocolError::InvalidPayload)?,
};

header.to_bytes()
let header_bytes = header.to_bytes();

header.checksum = MessageSerializer::generate_checksum(&header_bytes, payload)?;

Ok(header.to_bytes())
}

fn generate_checksum(header_bytes: &[u8], payload: &Payload) -> Result<u16, ProtocolError> {
let mut res: u16 = 0;

let all_bytes: Vec<u8> = header_bytes.iter().chain(payload.iter()).copied().collect();

for chunk in all_bytes.chunks(2) {
let pad_count = 2 - chunk.len();

let mut padding = vec![0; pad_count];
padding.fill(0);

let word: Vec<_> = chunk.iter().chain(padding.iter()).copied().collect();
let word: [u8; 2] = word
.get(0..=1)
.ok_or(ProtocolError::UngeneratableChecksum)?
.try_into()
.or(Err(ProtocolError::UngeneratableChecksum))?;
let word = u16::from_be_bytes(word);

res = res.wrapping_add(word);
}

Ok(res)
}

fn verify_checksum(message: Message) -> Result<(), ProtocolError> {
let header = message.get(0..=7).ok_or(ProtocolError::CorruptChecksum)?;
let payload = message.get(8..);

let checksum = header.get(1..=3);

// TODO: you must perform error detection on checksum to complete

Ok(())
}
}

Expand All @@ -141,6 +182,8 @@ pub enum ProtocolError {
InvalidPayload,
UnreadableStream,
InvalidOpcode,
UngeneratableChecksum,
CorruptChecksum,
}

pub struct MessageDeserializer {}
Expand All @@ -151,7 +194,6 @@ pub struct DeserializedMessage {
}

impl MessageDeserializer {
// TODO: use a struct to return instead of tuple
pub fn deserialize(message: MessageSlice) -> Result<DeserializedMessage, ProtocolError> {
log::debug!("Deserialize on message called: {:?}", message);

Expand Down
5 changes: 3 additions & 2 deletions packages/providers/core/lib/src/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ mod tests {
tokio::spawn(async move {
let _ = server.start().await;
});
// TODO: no sleepy, receive signal that server is ready instead
sleep(Duration::from_millis(100)).await;

let client = Arc::new(Mutex::new(UnixStream::connect(path).await.unwrap()));
Expand All @@ -137,11 +136,13 @@ mod tests {

let header = res.header;
let payload = res.payload;
assert_eq!(header.version, 0x0);
assert_eq!(header.version, 0x1);
assert_eq!(header.opcode, Opcode::GetValue);
assert_snapshot!(header.checksum);
assert_eq!(header.payload_length, 4);
assert_eq!(String::from_utf8(payload), Ok("nout".to_owned()));

// TODO: verify checksum
}

Err(err) => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
source: packages/providers/core/lib/src/server.rs
expression: header.checksum
---
0
58346
1 change: 0 additions & 1 deletion packages/providers/providers/env/lib/src/provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ impl Provider for EnvProvider {

match error {
env::VarError::NotPresent => ProviderError::NoValueForKey,
// TODO: do it properly
_ => ProviderError::ExplodeyProvider,
}
});
Expand Down

0 comments on commit 98a20d9

Please sign in to comment.