From 4ba7ee816bb82d5ee7e80ed19f76735e0806db48 Mon Sep 17 00:00:00 2001 From: Jeremy Wall Date: Sat, 6 Aug 2022 20:16:08 -0400 Subject: [PATCH] LevelDB store implementation --- Cargo.toml | 21 ++++- src/blake2.rs | 39 +++++++++ src/dag.rs | 59 ++++++------- src/hash.rs | 24 +---- src/leveldb/mod.rs | 75 ++++++++++++++++ src/lib.rs | 4 + src/node.rs | 213 ++++++++++++++++++++++++++++++++++++++++++--- src/proptest.rs | 8 +- src/store.rs | 32 +++---- src/test.rs | 28 +++--- 10 files changed, 401 insertions(+), 102 deletions(-) create mode 100644 src/blake2.rs create mode 100644 src/leveldb/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 95de544..76595dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,11 +7,28 @@ license = "Apache License 2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html -[dependencies] +[dependencies.ciborium] +version = "0.2.0" +optional = true + +[dependencies.serde] +version = "1.0.144" +features = ["derive"] [dependencies.proptest] version = "1.0.0" optional = true +[dependencies.rusty-leveldb] +version = "= 1.0.4" +optional = true + +[dependencies.blake2] +version = "0.10.4" +optional = true + [features] -default = [] \ No newline at end of file +default = [] +cbor = ["dep:ciborium"] +blake2 = ["dep:blake2"] +rusty-leveldb = ["dep:rusty-leveldb", "blake2", "cbor"] diff --git a/src/blake2.rs b/src/blake2.rs new file mode 100644 index 0000000..63a43df --- /dev/null +++ b/src/blake2.rs @@ -0,0 +1,39 @@ +// Copyright 2022 Jeremy Wall (Jeremy@marzhilsltudios.com) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::hash::*; +use blake2::digest::Digest; +pub use blake2::{Blake2b512, Blake2s256}; + +macro_rules! hash_writer_impl { + ($tname:ident, $size:expr) => { + impl HashWriter<$size> for $tname { + fn record>(&mut self, bs: I) { + let vec: Vec = bs.collect(); + self.update(&vec); + } + + fn hash(&self) -> [u8; $size] { + let mut out: [u8; $size] = Default::default(); + // This is gross but Blake2 doesn't support the + // non consuming version of this. + let mut arr = self.clone().finalize(); + arr.swap_with_slice(&mut out); + out + } + } + }; +} + +hash_writer_impl!(Blake2b512, 8); +hash_writer_impl!(Blake2s256, 4); diff --git a/src/dag.rs b/src/dag.rs index 7804369..d700b6c 100644 --- a/src/dag.rs +++ b/src/dag.rs @@ -15,9 +15,9 @@ use std::{collections::BTreeSet, marker::PhantomData}; use crate::{ - hash::{ByteEncoder, HashWriter}, + hash::HashWriter, node::Node, - store::{Store, StoreError}, + store::{Result, Store, StoreError}, }; /// Node comparison values. In a given Merkle DAG a Node can come `After`, `Before`, be `Equivalent`, or `Uncomparable`. @@ -44,22 +44,20 @@ pub enum NodeCompare { /// A merkle DAG instance is tied to a specific implementation of the HashWriter interface to ensure /// that all hash identifiers are of the same hash algorithm. #[derive(Clone, Debug)] -pub struct Merkle +pub struct Merkle where - N: ByteEncoder, HW: HashWriter, - S: Store, + S: Store, { roots: BTreeSet<[u8; HASH_LEN]>, nodes: S, - _phantom_node: PhantomData>, + _phantom_node: PhantomData>, } -impl Merkle +impl Merkle where - N: ByteEncoder, HW: HashWriter, - S: Store, + S: Store, { /// Construct a new empty DAG. The empty DAG is also the default for a DAG. pub fn new() -> Self { @@ -72,20 +70,20 @@ where /// /// One result of not constructing/adding nodes in this way is that we ensure that we always satisfy /// the implementation rule in the merkel-crdt's whitepaper. - pub fn add_node<'a>( + pub fn add_node<'a, N: Into>>( &'a mut self, item: N, dependency_ids: BTreeSet<[u8; HASH_LEN]>, - ) -> Result<[u8; HASH_LEN], StoreError> { - let node = Node::::new(item, dependency_ids.clone()); + ) -> Result<[u8; HASH_LEN]> { + let node = Node::::new(item.into(), dependency_ids.clone()); let id = node.id().clone(); - if self.nodes.contains(&id) { + if self.nodes.contains(&id)? { // We've already added this node so there is nothing left to do. return Ok(id); } let mut root_removals = Vec::new(); for dep_id in dependency_ids.iter() { - if !self.nodes.contains(dep_id) { + if !self.nodes.contains(dep_id)? { return Err(StoreError::NoSuchDependents); } // If any of our dependencies is in the roots pointer list then @@ -103,12 +101,12 @@ where } /// Check if we already have a copy of a node. - pub fn check_for_node(&self, id: &[u8; HASH_LEN]) -> bool { + pub fn check_for_node(&self, id: &[u8; HASH_LEN]) -> Result { return self.nodes.contains(id); } /// Get a node from the DAG by it's hash identifier if it exists. - pub fn get_node_by_id(&self, id: &[u8; HASH_LEN]) -> Option<&Node> { + pub fn get_node_by_id(&self, id: &[u8; HASH_LEN]) -> Result>> { self.nodes.get(id) } @@ -127,30 +125,30 @@ where /// then returns `NodeCompare::After`. If both id's are equal then the returns /// `NodeCompare::Equivalent`. If neither id are parts of the same subgraph then returns /// `NodeCompare::Uncomparable`. - pub fn compare(&self, left: &[u8; HASH_LEN], right: &[u8; HASH_LEN]) -> NodeCompare { - if left == right { + pub fn compare(&self, left: &[u8; HASH_LEN], right: &[u8; HASH_LEN]) -> Result { + Ok(if left == right { NodeCompare::Equivalent } else { // Is left node an ancestor of right node? - if self.search_graph(right, left) { + if self.search_graph(right, left)? { NodeCompare::Before // is right node an ancestor of left node? - } else if self.search_graph(left, right) { + } else if self.search_graph(left, right)? { NodeCompare::After } else { NodeCompare::Uncomparable } - } + }) } - fn search_graph(&self, root_id: &[u8; HASH_LEN], search_id: &[u8; HASH_LEN]) -> bool { + fn search_graph(&self, root_id: &[u8; HASH_LEN], search_id: &[u8; HASH_LEN]) -> Result { if root_id == search_id { - return true; + return Ok(true); } - let root_node = match self.get_node_by_id(root_id) { + let root_node = match self.get_node_by_id(root_id)? { Some(n) => n, None => { - return false; + return Ok(false); } }; let mut stack = vec![root_node]; @@ -159,23 +157,22 @@ where let deps = node.dependency_ids(); for dep in deps { if search_id == dep { - return true; + return Ok(true); } - stack.push(match self.get_node_by_id(dep) { + stack.push(match self.get_node_by_id(dep)? { Some(n) => n, None => panic!("Invalid DAG STATE encountered"), }) } } - return false; + return Ok(false); } } -impl Default for Merkle +impl Default for Merkle where - N: ByteEncoder, HW: HashWriter, - S: Store, + S: Store, { fn default() -> Self { Self { diff --git a/src/hash.rs b/src/hash.rs index 9e94565..a497b46 100644 --- a/src/hash.rs +++ b/src/hash.rs @@ -11,15 +11,9 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - +use std::collections::hash_map::DefaultHasher; use std::hash::Hasher; -/// Utility Trait to specify that payloads must be serializable into bytes. -pub trait ByteEncoder { - /// Serialize self into bytes. - fn bytes(&self) -> Vec; -} - /// Utility Trait to specify the hashing algorithm and provide a common /// interface for that algorithm to provide. This interface is expected to /// be stateful. @@ -28,15 +22,10 @@ pub trait HashWriter: Default { fn record>(&mut self, bs: I); /// Provide the current hash value based on the bytes that have so far been recorded. - /// It is expected that you can call this method multiple times while recording the - /// the bytes for input into the hash. fn hash(&self) -> [u8; LEN]; } -impl HashWriter<8> for H -where - H: Hasher + Default, -{ +impl HashWriter<8> for DefaultHasher { fn record>(&mut self, iter: I) { let bytes = iter.collect::>(); self.write(bytes.as_slice()); @@ -46,12 +35,3 @@ where self.finish().to_le_bytes() } } - -impl ByteEncoder for V -where - V: Into> + Clone, -{ - fn bytes(&self) -> Vec { - >>::into(self.clone()) - } -} diff --git a/src/leveldb/mod.rs b/src/leveldb/mod.rs new file mode 100644 index 0000000..23a740a --- /dev/null +++ b/src/leveldb/mod.rs @@ -0,0 +1,75 @@ +// Copyright 2022 Jeremy Wall (Jeremy@marzhilsltudios.com) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use std::cell::RefCell; +use std::path::Path; + +use crate::{ + node::Node, + store::{Result, Store, StoreError}, +}; + +use crate::blake2::*; +use ciborium; +use rusty_leveldb; + +// TODO(jwall): Add leveldb backing store for a Merkle-DAG + +pub struct LevelStore { + store: RefCell, +} + +impl LevelStore { + pub fn open>(path: P) -> std::result::Result { + let opts = Default::default(); + Ok(Self { + store: RefCell::new(rusty_leveldb::DB::open(path, opts)?), + }) + } +} +impl Store for LevelStore { + fn contains(&self, id: &[u8; 8]) -> Result { + Ok(self.store.borrow_mut().get(id).is_some()) + } + + fn get(&self, id: &[u8; 8]) -> Result>> { + Ok(match self.store.borrow_mut().get(id) { + Some(bs) => ciborium::de::from_reader(bs.as_slice()) + .map_err(|e| StoreError::StoreFailure(format!("Invalid serialization {:?}", e)))?, + None => None, + }) + } + + fn store(&mut self, node: Node) -> Result<()> { + let mut buf = Vec::new(); + ciborium::ser::into_writer(&node, &mut buf).unwrap(); + self.store.borrow_mut().put(node.id(), &buf)?; + Ok(()) + } +} + +impl From for StoreError { + fn from(status: rusty_leveldb::Status) -> Self { + StoreError::StoreFailure(format!("{}", status)) + } +} + +impl Default for LevelStore { + fn default() -> Self { + Self { + store: RefCell::new( + rusty_leveldb::DB::open("memory", rusty_leveldb::in_memory()).unwrap(), + ), + } + } +} diff --git a/src/lib.rs b/src/lib.rs index dfdbbc8..8d48008 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,8 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#[cfg(feature = "blake2")] +pub mod blake2; pub mod dag; pub mod hash; +#[cfg(feature = "rusty-leveldb")] +pub mod leveldb; pub mod node; pub mod prelude; pub mod store; diff --git a/src/node.rs b/src/node.rs index 87b7383..a63e852 100644 --- a/src/node.rs +++ b/src/node.rs @@ -13,7 +13,9 @@ // limitations under the License. use std::{collections::BTreeSet, marker::PhantomData}; -use crate::hash::{ByteEncoder, HashWriter}; +use serde::{de::Visitor, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::hash::HashWriter; /// A node in a merkle DAG. Nodes are composed of a payload item and a set of dependency_ids. /// They provide a unique identifier that is formed from the bytes of the payload as well @@ -25,32 +27,221 @@ use crate::hash::{ByteEncoder, HashWriter}; /// Nodes are tied to a specific implementation of the HashWriter trait which is itself tied /// to the DAG they are stored in guaranteeing that the same Hashing implementation is used /// for each node in the DAG. -#[derive(Debug, PartialEq, Clone)] -pub struct Node +#[derive(Debug, PartialEq)] +pub struct Node where - N: ByteEncoder, HW: HashWriter, { id: [u8; HASH_LEN], - item: N, + item: Vec, item_id: [u8; HASH_LEN], dependency_ids: BTreeSet<[u8; HASH_LEN]>, _phantom: PhantomData, } -impl Node +impl Clone for Node +where + HW: HashWriter, +{ + fn clone(&self) -> Self { + Self { + id: self.id.clone(), + item: self.item.clone(), + item_id: self.item_id.clone(), + dependency_ids: self.dependency_ids.clone(), + _phantom: PhantomData, + } + } +} + +impl Serialize for Node +where + HW: HashWriter, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut structor = serializer.serialize_struct("Node", 4)?; + structor.serialize_field("id", self.id.as_slice())?; + structor.serialize_field("item", &self.item)?; + structor.serialize_field("item_id", self.item_id.as_slice())?; + // TODO(jwall): structor.serialize_field("dependency_ids", &self.dependency_ids)?; + structor.end() + } +} + +fn coerce_array(slice: &[u8]) -> Result<[u8; HASH_LEN], String> { + let mut coerced_item: [u8; HASH_LEN] = [0; HASH_LEN]; + if slice.len() > coerced_item.len() { + return Err(format!( + "Expected slice of length: {} but got slice of length: {}", + coerced_item.len(), + slice.len() + )); + } else { + coerced_item.copy_from_slice(slice); + } + Ok(coerced_item) +} + +fn coerce_set( + set: BTreeSet<&[u8]>, +) -> Result, String> { + let mut coerced_item = BTreeSet::new(); + for slice in set { + coerced_item.insert(coerce_array(slice)?); + } + Ok(coerced_item) +} + +impl<'de, HW, const HASH_LEN: usize> Deserialize<'de> for Node +where + HW: HashWriter, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "lowercase")] + #[allow(non_camel_case_types)] + enum Field { + Id, + Item, + Item_Id, + Dependency_Ids, + } + + struct NodeVisitor(PhantomData); + + impl<'de, HW, const HASH_LEN: usize> Visitor<'de> for NodeVisitor + where + HW: HashWriter, + { + type Value = Node; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("struct Node") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let id: [u8; HASH_LEN] = coerce_array( + seq.next_element::<&[u8]>()? + .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?, + ) + .map_err(serde::de::Error::custom)?; + let item = seq + .next_element::>()? + .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; + let item_id: [u8; HASH_LEN] = coerce_array( + seq.next_element::<&[u8]>()? + .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?, + ) + .map_err(serde::de::Error::custom)?; + let dependency_ids: BTreeSet<[u8; HASH_LEN]> = coerce_set( + seq.next_element::>()? + .ok_or_else(|| serde::de::Error::invalid_length(3, &self))?, + ) + .map_err(serde::de::Error::custom)?; + Ok(Self::Value { + id, + item, + item_id, + dependency_ids, + _phantom: PhantomData, + }) + } + + fn visit_map(self, mut map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + let mut id: Option<[u8; HASH_LEN]> = None; + let mut item: Option> = None; + let mut item_id: Option<[u8; HASH_LEN]> = None; + let mut dependency_ids: Option> = None; + while let Some(key) = map.next_key()? { + match key { + Field::Id => { + if id.is_some() { + return Err(serde::de::Error::duplicate_field("id")); + } else { + id = Some( + coerce_array(map.next_value()?) + .map_err(serde::de::Error::custom)?, + ); + } + } + Field::Item => { + if item.is_some() { + return Err(serde::de::Error::duplicate_field("item")); + } else { + item = Some(map.next_value()?); + } + } + Field::Item_Id => { + if item_id.is_some() { + return Err(serde::de::Error::duplicate_field("item_id")); + } else { + item_id = Some( + coerce_array(map.next_value()?) + .map_err(serde::de::Error::custom)?, + ); + } + } + Field::Dependency_Ids => { + if dependency_ids.is_some() { + return Err(serde::de::Error::duplicate_field("dependency_ids")); + } else { + dependency_ids = Some( + coerce_set(map.next_value()?) + .map_err(serde::de::Error::custom)?, + ); + } + } + } + } + let id = id.ok_or_else(|| serde::de::Error::missing_field("id"))?; + let item = item.ok_or_else(|| serde::de::Error::missing_field("item"))?; + let item_id = item_id.ok_or_else(|| serde::de::Error::missing_field("item_id"))?; + let dependency_ids = dependency_ids + .ok_or_else(|| serde::de::Error::missing_field("dependency_ids"))?; + + Ok(Self::Value { + id, + item, + item_id, + dependency_ids, + _phantom: PhantomData, + }) + } + } + + const FIELDS: &'static [&'static str] = &["id", "item", "item_id", "dependency_ids"]; + deserializer.deserialize_struct( + "Duration", + FIELDS, + NodeVisitor::(PhantomData), + ) + } +} + +impl Node where - N: ByteEncoder, HW: HashWriter, { /// Construct a new node with a payload and a set of dependency_ids. - pub fn new(item: N, dependency_ids: BTreeSet<[u8; HASH_LEN]>) -> Self { + pub fn new>>(item: P, dependency_ids: BTreeSet<[u8; HASH_LEN]>) -> Self { let mut hw = HW::default(); - + let item = item.into(); // NOTE(jwall): The order here is important. Our reliable id creation must be stable // for multiple calls to this constructor. This means that we must *always* // 1. Record the `item_id` hash first. - hw.record(item.bytes().into_iter()); + hw.record(item.iter().cloned()); let item_id = hw.hash(); // 2. Sort the dependency ids before recording them into our node id hash. let mut dependency_list = dependency_ids @@ -75,7 +266,7 @@ where &self.id } - pub fn item(&self) -> &N { + pub fn item(&self) -> &[u8] { &self.item } diff --git a/src/proptest.rs b/src/proptest.rs index 4598dca..add11c9 100644 --- a/src/proptest.rs +++ b/src/proptest.rs @@ -17,7 +17,7 @@ use proptest::prelude::*; use crate::prelude::*; -type TestDag = Merkle>, String, DefaultHasher, 8>; +type TestDag = Merkle>, DefaultHasher, 8>; fn simple_edge_strategy( nodes_count: usize, @@ -79,7 +79,7 @@ proptest! { let mut node_set = BTreeSet::new(); for (idx, n) in nodes.iter().cloned().enumerate() { if !parent_idxs.contains(&idx) { - let node_id = dag.add_node(n, BTreeSet::new()).unwrap(); + let node_id = dag.add_node(n.as_bytes(), BTreeSet::new()).unwrap(); node_set.insert(node_id.clone()); let parent = idx % parent_count; if dependents.contains_key(&parent) { @@ -114,7 +114,7 @@ proptest! { continue; } for root in roots.iter() { - if let NodeCompare::After = dag.compare(root, node_id) { + if let NodeCompare::After = dag.compare(root, node_id).unwrap() { // success is_descendant = true; } @@ -125,7 +125,7 @@ proptest! { for left_root in roots.iter() { for right_root in roots.iter() { if left_root != right_root { - assert_eq!(dag.compare(left_root, right_root), NodeCompare::Uncomparable); + assert_eq!(dag.compare(left_root, right_root).unwrap(), NodeCompare::Uncomparable); } } } diff --git a/src/store.rs b/src/store.rs index 58c87a0..3aea7de 100644 --- a/src/store.rs +++ b/src/store.rs @@ -13,42 +13,38 @@ // limitations under the License. use std::collections::BTreeMap; -use crate::{ - hash::{ByteEncoder, HashWriter}, - node::Node, -}; +use crate::{hash::HashWriter, node::Node}; + +pub type Result = std::result::Result; #[derive(Debug, Clone)] pub enum StoreError { - StoreFailure, + StoreFailure(String), NoSuchDependents, } -pub trait Store: Default +pub trait Store: Default where - N: ByteEncoder, HW: HashWriter, { - fn contains(&self, id: &[u8; HASH_LEN]) -> bool; - fn get(&self, id: &[u8; HASH_LEN]) -> Option<&Node>; - fn store(&mut self, node: Node) -> Result<(), StoreError>; + fn contains(&self, id: &[u8; HASH_LEN]) -> Result; + fn get(&self, id: &[u8; HASH_LEN]) -> Result>>; + fn store(&mut self, node: Node) -> Result<()>; } -impl Store - for BTreeMap<[u8; HASH_LEN], Node> +impl Store for BTreeMap<[u8; HASH_LEN], Node> where - N: ByteEncoder, HW: HashWriter, { - fn contains(&self, id: &[u8; HASH_LEN]) -> bool { - self.contains_key(id) + fn contains(&self, id: &[u8; HASH_LEN]) -> Result { + Ok(self.contains_key(id)) } - fn get(&self, id: &[u8; HASH_LEN]) -> Option<&Node> { - self.get(id) + fn get(&self, id: &[u8; HASH_LEN]) -> Result>> { + Ok(self.get(id).cloned()) } - fn store(&mut self, node: Node) -> Result<(), StoreError> { + fn store(&mut self, node: Node) -> Result<()> { self.insert(node.id().clone(), node); Ok(()) } diff --git a/src/test.rs b/src/test.rs index fc853be..82b6e4b 100644 --- a/src/test.rs +++ b/src/test.rs @@ -17,8 +17,7 @@ use std::collections::{BTreeMap, BTreeSet}; use crate::prelude::*; type TestDag<'a> = Merkle< - BTreeMap<[u8; 8], Node<&'a str, std::collections::hash_map::DefaultHasher, 8>>, - &'a str, + BTreeMap<[u8; 8], Node>, std::collections::hash_map::DefaultHasher, 8, >; @@ -29,7 +28,7 @@ fn test_root_pointer_hygiene() { let quax_node_id = dag.add_node("quax", BTreeSet::new()).unwrap(); assert_eq!( quax_node_id, - *dag.get_node_by_id(&quax_node_id).unwrap().id() + *dag.get_node_by_id(&quax_node_id).unwrap().unwrap().id() ); assert!(dag.get_roots().contains(&quax_node_id)); let mut dep_set = BTreeSet::new(); @@ -39,13 +38,14 @@ fn test_root_pointer_hygiene() { assert!(dag.get_roots().contains(&quux_node_id)); assert_eq!( quux_node_id, - *dag.get_node_by_id(&quux_node_id).unwrap().id() + *dag.get_node_by_id(&quux_node_id).unwrap().unwrap().id() ); } #[test] fn test_insert_no_such_dependents_error() { - let missing_dependent = Node::<&str, DefaultHasher, 8>::new("missing", BTreeSet::new()); + let missing_dependent = + Node::::new("missing".as_bytes().to_vec(), BTreeSet::new()); let mut dag = TestDag::new(); let mut dep_set = BTreeSet::new(); dep_set.insert(*missing_dependent.id()); @@ -60,7 +60,7 @@ fn test_adding_nodes_is_idempotent() { let quax_node_id = dag.add_node("quax", BTreeSet::new()).unwrap(); assert_eq!( quax_node_id, - *dag.get_node_by_id(&quax_node_id).unwrap().id() + *dag.get_node_by_id(&quax_node_id).unwrap().unwrap().id() ); assert!(dag.get_roots().contains(&quax_node_id)); let root_size = dag.get_roots().len(); @@ -97,7 +97,7 @@ fn test_node_comparison_equivalent() { let mut dag = TestDag::new(); let quake_node_id = dag.add_node("quake", BTreeSet::new()).unwrap(); assert_eq!( - dag.compare(&quake_node_id, &quake_node_id), + dag.compare(&quake_node_id, &quake_node_id).unwrap(), NodeCompare::Equivalent ); } @@ -113,11 +113,11 @@ fn test_node_comparison_before() { .add_node("quell", BTreeSet::from([qualm_node_id.clone()])) .unwrap(); assert_eq!( - dag.compare(&quake_node_id, &qualm_node_id), + dag.compare(&quake_node_id, &qualm_node_id).unwrap(), NodeCompare::Before ); assert_eq!( - dag.compare(&quake_node_id, &quell_node_id), + dag.compare(&quake_node_id, &quell_node_id).unwrap(), NodeCompare::Before ); } @@ -133,11 +133,11 @@ fn test_node_comparison_after() { .add_node("quell", BTreeSet::from([qualm_node_id.clone()])) .unwrap(); assert_eq!( - dag.compare(&qualm_node_id, &quake_node_id), + dag.compare(&qualm_node_id, &quake_node_id).unwrap(), NodeCompare::After ); assert_eq!( - dag.compare(&quell_node_id, &quake_node_id), + dag.compare(&quell_node_id, &quake_node_id).unwrap(), NodeCompare::After ); } @@ -149,15 +149,15 @@ fn test_node_comparison_no_shared_graph() { let qualm_node_id = dag.add_node("qualm", BTreeSet::new()).unwrap(); let quell_node_id = dag.add_node("quell", BTreeSet::new()).unwrap(); assert_eq!( - dag.compare(&qualm_node_id, &quake_node_id), + dag.compare(&qualm_node_id, &quake_node_id).unwrap(), NodeCompare::Uncomparable ); assert_eq!( - dag.compare(&quell_node_id, &quake_node_id), + dag.compare(&quell_node_id, &quake_node_id).unwrap(), NodeCompare::Uncomparable ); assert_eq!( - dag.compare(&quell_node_id, &qualm_node_id), + dag.compare(&quell_node_id, &qualm_node_id).unwrap(), NodeCompare::Uncomparable ); }