LevelDB store implementation

This commit is contained in:
Jeremy Wall 2022-08-06 20:16:08 -04:00
parent 06e0dc270c
commit 4ba7ee816b
10 changed files with 401 additions and 102 deletions

View File

@ -7,11 +7,28 @@ license = "Apache License 2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[dependencies.ciborium]
version = "0.2.0"
optional = true
[dependencies.serde]
version = "1.0.144"
features = ["derive"]
[dependencies.proptest]
version = "1.0.0"
optional = true
[dependencies.rusty-leveldb]
version = "= 1.0.4"
optional = true
[dependencies.blake2]
version = "0.10.4"
optional = true
[features]
default = []
cbor = ["dep:ciborium"]
blake2 = ["dep:blake2"]
rusty-leveldb = ["dep:rusty-leveldb", "blake2", "cbor"]

39
src/blake2.rs Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2022 Jeremy Wall (Jeremy@marzhilsltudios.com)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::hash::*;
use blake2::digest::Digest;
pub use blake2::{Blake2b512, Blake2s256};
macro_rules! hash_writer_impl {
($tname:ident, $size:expr) => {
impl HashWriter<$size> for $tname {
fn record<I: Iterator<Item = u8>>(&mut self, bs: I) {
let vec: Vec<u8> = bs.collect();
self.update(&vec);
}
fn hash(&self) -> [u8; $size] {
let mut out: [u8; $size] = Default::default();
// This is gross but Blake2 doesn't support the
// non consuming version of this.
let mut arr = self.clone().finalize();
arr.swap_with_slice(&mut out);
out
}
}
};
}
hash_writer_impl!(Blake2b512, 8);
hash_writer_impl!(Blake2s256, 4);

View File

@ -15,9 +15,9 @@
use std::{collections::BTreeSet, marker::PhantomData};
use crate::{
hash::{ByteEncoder, HashWriter},
hash::HashWriter,
node::Node,
store::{Store, StoreError},
store::{Result, Store, StoreError},
};
/// Node comparison values. In a given Merkle DAG a Node can come `After`, `Before`, be `Equivalent`, or `Uncomparable`.
@ -44,22 +44,20 @@ pub enum NodeCompare {
/// A merkle DAG instance is tied to a specific implementation of the HashWriter interface to ensure
/// that all hash identifiers are of the same hash algorithm.
#[derive(Clone, Debug)]
pub struct Merkle<S, N, HW, const HASH_LEN: usize>
pub struct Merkle<S, HW, const HASH_LEN: usize>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
S: Store<N, HW, HASH_LEN>,
S: Store<HW, HASH_LEN>,
{
roots: BTreeSet<[u8; HASH_LEN]>,
nodes: S,
_phantom_node: PhantomData<Node<N, HW, HASH_LEN>>,
_phantom_node: PhantomData<Node<HW, HASH_LEN>>,
}
impl<S, N, HW, const HASH_LEN: usize> Merkle<S, N, HW, HASH_LEN>
impl<S, HW, const HASH_LEN: usize> Merkle<S, HW, HASH_LEN>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
S: Store<N, HW, HASH_LEN>,
S: Store<HW, HASH_LEN>,
{
/// Construct a new empty DAG. The empty DAG is also the default for a DAG.
pub fn new() -> Self {
@ -72,20 +70,20 @@ where
///
/// One result of not constructing/adding nodes in this way is that we ensure that we always satisfy
/// the implementation rule in the merkel-crdt's whitepaper.
pub fn add_node<'a>(
pub fn add_node<'a, N: Into<Vec<u8>>>(
&'a mut self,
item: N,
dependency_ids: BTreeSet<[u8; HASH_LEN]>,
) -> Result<[u8; HASH_LEN], StoreError> {
let node = Node::<N, HW, HASH_LEN>::new(item, dependency_ids.clone());
) -> Result<[u8; HASH_LEN]> {
let node = Node::<HW, HASH_LEN>::new(item.into(), dependency_ids.clone());
let id = node.id().clone();
if self.nodes.contains(&id) {
if self.nodes.contains(&id)? {
// We've already added this node so there is nothing left to do.
return Ok(id);
}
let mut root_removals = Vec::new();
for dep_id in dependency_ids.iter() {
if !self.nodes.contains(dep_id) {
if !self.nodes.contains(dep_id)? {
return Err(StoreError::NoSuchDependents);
}
// If any of our dependencies is in the roots pointer list then
@ -103,12 +101,12 @@ where
}
/// Check if we already have a copy of a node.
pub fn check_for_node(&self, id: &[u8; HASH_LEN]) -> bool {
pub fn check_for_node(&self, id: &[u8; HASH_LEN]) -> Result<bool> {
return self.nodes.contains(id);
}
/// Get a node from the DAG by it's hash identifier if it exists.
pub fn get_node_by_id(&self, id: &[u8; HASH_LEN]) -> Option<&Node<N, HW, HASH_LEN>> {
pub fn get_node_by_id(&self, id: &[u8; HASH_LEN]) -> Result<Option<Node<HW, HASH_LEN>>> {
self.nodes.get(id)
}
@ -127,30 +125,30 @@ where
/// then returns `NodeCompare::After`. If both id's are equal then the returns
/// `NodeCompare::Equivalent`. If neither id are parts of the same subgraph then returns
/// `NodeCompare::Uncomparable`.
pub fn compare(&self, left: &[u8; HASH_LEN], right: &[u8; HASH_LEN]) -> NodeCompare {
if left == right {
pub fn compare(&self, left: &[u8; HASH_LEN], right: &[u8; HASH_LEN]) -> Result<NodeCompare> {
Ok(if left == right {
NodeCompare::Equivalent
} else {
// Is left node an ancestor of right node?
if self.search_graph(right, left) {
if self.search_graph(right, left)? {
NodeCompare::Before
// is right node an ancestor of left node?
} else if self.search_graph(left, right) {
} else if self.search_graph(left, right)? {
NodeCompare::After
} else {
NodeCompare::Uncomparable
}
}
})
}
fn search_graph(&self, root_id: &[u8; HASH_LEN], search_id: &[u8; HASH_LEN]) -> bool {
fn search_graph(&self, root_id: &[u8; HASH_LEN], search_id: &[u8; HASH_LEN]) -> Result<bool> {
if root_id == search_id {
return true;
return Ok(true);
}
let root_node = match self.get_node_by_id(root_id) {
let root_node = match self.get_node_by_id(root_id)? {
Some(n) => n,
None => {
return false;
return Ok(false);
}
};
let mut stack = vec![root_node];
@ -159,23 +157,22 @@ where
let deps = node.dependency_ids();
for dep in deps {
if search_id == dep {
return true;
return Ok(true);
}
stack.push(match self.get_node_by_id(dep) {
stack.push(match self.get_node_by_id(dep)? {
Some(n) => n,
None => panic!("Invalid DAG STATE encountered"),
})
}
}
return false;
return Ok(false);
}
}
impl<S, N, HW, const HASH_LEN: usize> Default for Merkle<S, N, HW, HASH_LEN>
impl<S, HW, const HASH_LEN: usize> Default for Merkle<S, HW, HASH_LEN>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
S: Store<N, HW, HASH_LEN>,
S: Store<HW, HASH_LEN>,
{
fn default() -> Self {
Self {

View File

@ -11,15 +11,9 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
/// Utility Trait to specify that payloads must be serializable into bytes.
pub trait ByteEncoder {
/// Serialize self into bytes.
fn bytes(&self) -> Vec<u8>;
}
/// Utility Trait to specify the hashing algorithm and provide a common
/// interface for that algorithm to provide. This interface is expected to
/// be stateful.
@ -28,15 +22,10 @@ pub trait HashWriter<const LEN: usize>: Default {
fn record<I: Iterator<Item = u8>>(&mut self, bs: I);
/// Provide the current hash value based on the bytes that have so far been recorded.
/// It is expected that you can call this method multiple times while recording the
/// the bytes for input into the hash.
fn hash(&self) -> [u8; LEN];
}
impl<H> HashWriter<8> for H
where
H: Hasher + Default,
{
impl HashWriter<8> for DefaultHasher {
fn record<I: Iterator<Item = u8>>(&mut self, iter: I) {
let bytes = iter.collect::<Vec<u8>>();
self.write(bytes.as_slice());
@ -46,12 +35,3 @@ where
self.finish().to_le_bytes()
}
}
impl<V> ByteEncoder for V
where
V: Into<Vec<u8>> + Clone,
{
fn bytes(&self) -> Vec<u8> {
<Self as Into<Vec<u8>>>::into(self.clone())
}
}

75
src/leveldb/mod.rs Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2022 Jeremy Wall (Jeremy@marzhilsltudios.com)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cell::RefCell;
use std::path::Path;
use crate::{
node::Node,
store::{Result, Store, StoreError},
};
use crate::blake2::*;
use ciborium;
use rusty_leveldb;
// TODO(jwall): Add leveldb backing store for a Merkle-DAG
pub struct LevelStore {
store: RefCell<rusty_leveldb::DB>,
}
impl LevelStore {
pub fn open<P: AsRef<Path>>(path: P) -> std::result::Result<Self, rusty_leveldb::Status> {
let opts = Default::default();
Ok(Self {
store: RefCell::new(rusty_leveldb::DB::open(path, opts)?),
})
}
}
impl Store<Blake2b512, 8> for LevelStore {
fn contains(&self, id: &[u8; 8]) -> Result<bool> {
Ok(self.store.borrow_mut().get(id).is_some())
}
fn get(&self, id: &[u8; 8]) -> Result<Option<Node<Blake2b512, 8>>> {
Ok(match self.store.borrow_mut().get(id) {
Some(bs) => ciborium::de::from_reader(bs.as_slice())
.map_err(|e| StoreError::StoreFailure(format!("Invalid serialization {:?}", e)))?,
None => None,
})
}
fn store(&mut self, node: Node<Blake2b512, 8>) -> Result<()> {
let mut buf = Vec::new();
ciborium::ser::into_writer(&node, &mut buf).unwrap();
self.store.borrow_mut().put(node.id(), &buf)?;
Ok(())
}
}
impl From<rusty_leveldb::Status> for StoreError {
fn from(status: rusty_leveldb::Status) -> Self {
StoreError::StoreFailure(format!("{}", status))
}
}
impl Default for LevelStore {
fn default() -> Self {
Self {
store: RefCell::new(
rusty_leveldb::DB::open("memory", rusty_leveldb::in_memory()).unwrap(),
),
}
}
}

View File

@ -11,8 +11,12 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[cfg(feature = "blake2")]
pub mod blake2;
pub mod dag;
pub mod hash;
#[cfg(feature = "rusty-leveldb")]
pub mod leveldb;
pub mod node;
pub mod prelude;
pub mod store;

View File

@ -13,7 +13,9 @@
// limitations under the License.
use std::{collections::BTreeSet, marker::PhantomData};
use crate::hash::{ByteEncoder, HashWriter};
use serde::{de::Visitor, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer};
use crate::hash::HashWriter;
/// A node in a merkle DAG. Nodes are composed of a payload item and a set of dependency_ids.
/// They provide a unique identifier that is formed from the bytes of the payload as well
@ -25,32 +27,221 @@ use crate::hash::{ByteEncoder, HashWriter};
/// Nodes are tied to a specific implementation of the HashWriter trait which is itself tied
/// to the DAG they are stored in guaranteeing that the same Hashing implementation is used
/// for each node in the DAG.
#[derive(Debug, PartialEq, Clone)]
pub struct Node<N, HW, const HASH_LEN: usize>
#[derive(Debug, PartialEq)]
pub struct Node<HW, const HASH_LEN: usize>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
{
id: [u8; HASH_LEN],
item: N,
item: Vec<u8>,
item_id: [u8; HASH_LEN],
dependency_ids: BTreeSet<[u8; HASH_LEN]>,
_phantom: PhantomData<HW>,
}
impl<N, HW, const HASH_LEN: usize> Node<N, HW, HASH_LEN>
impl<HW, const HASH_LEN: usize> Clone for Node<HW, HASH_LEN>
where
HW: HashWriter<HASH_LEN>,
{
fn clone(&self) -> Self {
Self {
id: self.id.clone(),
item: self.item.clone(),
item_id: self.item_id.clone(),
dependency_ids: self.dependency_ids.clone(),
_phantom: PhantomData,
}
}
}
impl<HW, const HASH_LEN: usize> Serialize for Node<HW, HASH_LEN>
where
HW: HashWriter<HASH_LEN>,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut structor = serializer.serialize_struct("Node", 4)?;
structor.serialize_field("id", self.id.as_slice())?;
structor.serialize_field("item", &self.item)?;
structor.serialize_field("item_id", self.item_id.as_slice())?;
// TODO(jwall): structor.serialize_field("dependency_ids", &self.dependency_ids)?;
structor.end()
}
}
fn coerce_array<const HASH_LEN: usize>(slice: &[u8]) -> Result<[u8; HASH_LEN], String> {
let mut coerced_item: [u8; HASH_LEN] = [0; HASH_LEN];
if slice.len() > coerced_item.len() {
return Err(format!(
"Expected slice of length: {} but got slice of length: {}",
coerced_item.len(),
slice.len()
));
} else {
coerced_item.copy_from_slice(slice);
}
Ok(coerced_item)
}
fn coerce_set<const HASH_LEN: usize>(
set: BTreeSet<&[u8]>,
) -> Result<BTreeSet<[u8; HASH_LEN]>, String> {
let mut coerced_item = BTreeSet::new();
for slice in set {
coerced_item.insert(coerce_array(slice)?);
}
Ok(coerced_item)
}
impl<'de, HW, const HASH_LEN: usize> Deserialize<'de> for Node<HW, HASH_LEN>
where
HW: HashWriter<HASH_LEN>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(field_identifier, rename_all = "lowercase")]
#[allow(non_camel_case_types)]
enum Field {
Id,
Item,
Item_Id,
Dependency_Ids,
}
struct NodeVisitor<HW, const HASH_LEN: usize>(PhantomData<HW>);
impl<'de, HW, const HASH_LEN: usize> Visitor<'de> for NodeVisitor<HW, HASH_LEN>
where
HW: HashWriter<HASH_LEN>,
{
type Value = Node<HW, HASH_LEN>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("struct Node")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let id: [u8; HASH_LEN] = coerce_array(
seq.next_element::<&[u8]>()?
.ok_or_else(|| serde::de::Error::invalid_length(0, &self))?,
)
.map_err(serde::de::Error::custom)?;
let item = seq
.next_element::<Vec<u8>>()?
.ok_or_else(|| serde::de::Error::invalid_length(1, &self))?;
let item_id: [u8; HASH_LEN] = coerce_array(
seq.next_element::<&[u8]>()?
.ok_or_else(|| serde::de::Error::invalid_length(0, &self))?,
)
.map_err(serde::de::Error::custom)?;
let dependency_ids: BTreeSet<[u8; HASH_LEN]> = coerce_set(
seq.next_element::<BTreeSet<&[u8]>>()?
.ok_or_else(|| serde::de::Error::invalid_length(3, &self))?,
)
.map_err(serde::de::Error::custom)?;
Ok(Self::Value {
id,
item,
item_id,
dependency_ids,
_phantom: PhantomData,
})
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: serde::de::MapAccess<'de>,
{
let mut id: Option<[u8; HASH_LEN]> = None;
let mut item: Option<Vec<u8>> = None;
let mut item_id: Option<[u8; HASH_LEN]> = None;
let mut dependency_ids: Option<BTreeSet<[u8; HASH_LEN]>> = None;
while let Some(key) = map.next_key()? {
match key {
Field::Id => {
if id.is_some() {
return Err(serde::de::Error::duplicate_field("id"));
} else {
id = Some(
coerce_array(map.next_value()?)
.map_err(serde::de::Error::custom)?,
);
}
}
Field::Item => {
if item.is_some() {
return Err(serde::de::Error::duplicate_field("item"));
} else {
item = Some(map.next_value()?);
}
}
Field::Item_Id => {
if item_id.is_some() {
return Err(serde::de::Error::duplicate_field("item_id"));
} else {
item_id = Some(
coerce_array(map.next_value()?)
.map_err(serde::de::Error::custom)?,
);
}
}
Field::Dependency_Ids => {
if dependency_ids.is_some() {
return Err(serde::de::Error::duplicate_field("dependency_ids"));
} else {
dependency_ids = Some(
coerce_set(map.next_value()?)
.map_err(serde::de::Error::custom)?,
);
}
}
}
}
let id = id.ok_or_else(|| serde::de::Error::missing_field("id"))?;
let item = item.ok_or_else(|| serde::de::Error::missing_field("item"))?;
let item_id = item_id.ok_or_else(|| serde::de::Error::missing_field("item_id"))?;
let dependency_ids = dependency_ids
.ok_or_else(|| serde::de::Error::missing_field("dependency_ids"))?;
Ok(Self::Value {
id,
item,
item_id,
dependency_ids,
_phantom: PhantomData,
})
}
}
const FIELDS: &'static [&'static str] = &["id", "item", "item_id", "dependency_ids"];
deserializer.deserialize_struct(
"Duration",
FIELDS,
NodeVisitor::<HW, HASH_LEN>(PhantomData),
)
}
}
impl<HW, const HASH_LEN: usize> Node<HW, HASH_LEN>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
{
/// Construct a new node with a payload and a set of dependency_ids.
pub fn new(item: N, dependency_ids: BTreeSet<[u8; HASH_LEN]>) -> Self {
pub fn new<P: Into<Vec<u8>>>(item: P, dependency_ids: BTreeSet<[u8; HASH_LEN]>) -> Self {
let mut hw = HW::default();
let item = item.into();
// NOTE(jwall): The order here is important. Our reliable id creation must be stable
// for multiple calls to this constructor. This means that we must *always*
// 1. Record the `item_id` hash first.
hw.record(item.bytes().into_iter());
hw.record(item.iter().cloned());
let item_id = hw.hash();
// 2. Sort the dependency ids before recording them into our node id hash.
let mut dependency_list = dependency_ids
@ -75,7 +266,7 @@ where
&self.id
}
pub fn item(&self) -> &N {
pub fn item(&self) -> &[u8] {
&self.item
}

View File

@ -17,7 +17,7 @@ use proptest::prelude::*;
use crate::prelude::*;
type TestDag = Merkle<BTreeMap<[u8; 8], Node<String, DefaultHasher, 8>>, String, DefaultHasher, 8>;
type TestDag = Merkle<BTreeMap<[u8; 8], Node<DefaultHasher, 8>>, DefaultHasher, 8>;
fn simple_edge_strategy(
nodes_count: usize,
@ -79,7 +79,7 @@ proptest! {
let mut node_set = BTreeSet::new();
for (idx, n) in nodes.iter().cloned().enumerate() {
if !parent_idxs.contains(&idx) {
let node_id = dag.add_node(n, BTreeSet::new()).unwrap();
let node_id = dag.add_node(n.as_bytes(), BTreeSet::new()).unwrap();
node_set.insert(node_id.clone());
let parent = idx % parent_count;
if dependents.contains_key(&parent) {
@ -114,7 +114,7 @@ proptest! {
continue;
}
for root in roots.iter() {
if let NodeCompare::After = dag.compare(root, node_id) {
if let NodeCompare::After = dag.compare(root, node_id).unwrap() {
// success
is_descendant = true;
}
@ -125,7 +125,7 @@ proptest! {
for left_root in roots.iter() {
for right_root in roots.iter() {
if left_root != right_root {
assert_eq!(dag.compare(left_root, right_root), NodeCompare::Uncomparable);
assert_eq!(dag.compare(left_root, right_root).unwrap(), NodeCompare::Uncomparable);
}
}
}

View File

@ -13,42 +13,38 @@
// limitations under the License.
use std::collections::BTreeMap;
use crate::{
hash::{ByteEncoder, HashWriter},
node::Node,
};
use crate::{hash::HashWriter, node::Node};
pub type Result<T> = std::result::Result<T, StoreError>;
#[derive(Debug, Clone)]
pub enum StoreError {
StoreFailure,
StoreFailure(String),
NoSuchDependents,
}
pub trait Store<N, HW, const HASH_LEN: usize>: Default
pub trait Store<HW, const HASH_LEN: usize>: Default
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
{
fn contains(&self, id: &[u8; HASH_LEN]) -> bool;
fn get(&self, id: &[u8; HASH_LEN]) -> Option<&Node<N, HW, HASH_LEN>>;
fn store(&mut self, node: Node<N, HW, HASH_LEN>) -> Result<(), StoreError>;
fn contains(&self, id: &[u8; HASH_LEN]) -> Result<bool>;
fn get(&self, id: &[u8; HASH_LEN]) -> Result<Option<Node<HW, HASH_LEN>>>;
fn store(&mut self, node: Node<HW, HASH_LEN>) -> Result<()>;
}
impl<N, HW, const HASH_LEN: usize> Store<N, HW, HASH_LEN>
for BTreeMap<[u8; HASH_LEN], Node<N, HW, HASH_LEN>>
impl<HW, const HASH_LEN: usize> Store<HW, HASH_LEN> for BTreeMap<[u8; HASH_LEN], Node<HW, HASH_LEN>>
where
N: ByteEncoder,
HW: HashWriter<HASH_LEN>,
{
fn contains(&self, id: &[u8; HASH_LEN]) -> bool {
self.contains_key(id)
fn contains(&self, id: &[u8; HASH_LEN]) -> Result<bool> {
Ok(self.contains_key(id))
}
fn get(&self, id: &[u8; HASH_LEN]) -> Option<&Node<N, HW, HASH_LEN>> {
self.get(id)
fn get(&self, id: &[u8; HASH_LEN]) -> Result<Option<Node<HW, HASH_LEN>>> {
Ok(self.get(id).cloned())
}
fn store(&mut self, node: Node<N, HW, HASH_LEN>) -> Result<(), StoreError> {
fn store(&mut self, node: Node<HW, HASH_LEN>) -> Result<()> {
self.insert(node.id().clone(), node);
Ok(())
}

View File

@ -17,8 +17,7 @@ use std::collections::{BTreeMap, BTreeSet};
use crate::prelude::*;
type TestDag<'a> = Merkle<
BTreeMap<[u8; 8], Node<&'a str, std::collections::hash_map::DefaultHasher, 8>>,
&'a str,
BTreeMap<[u8; 8], Node<std::collections::hash_map::DefaultHasher, 8>>,
std::collections::hash_map::DefaultHasher,
8,
>;
@ -29,7 +28,7 @@ fn test_root_pointer_hygiene() {
let quax_node_id = dag.add_node("quax", BTreeSet::new()).unwrap();
assert_eq!(
quax_node_id,
*dag.get_node_by_id(&quax_node_id).unwrap().id()
*dag.get_node_by_id(&quax_node_id).unwrap().unwrap().id()
);
assert!(dag.get_roots().contains(&quax_node_id));
let mut dep_set = BTreeSet::new();
@ -39,13 +38,14 @@ fn test_root_pointer_hygiene() {
assert!(dag.get_roots().contains(&quux_node_id));
assert_eq!(
quux_node_id,
*dag.get_node_by_id(&quux_node_id).unwrap().id()
*dag.get_node_by_id(&quux_node_id).unwrap().unwrap().id()
);
}
#[test]
fn test_insert_no_such_dependents_error() {
let missing_dependent = Node::<&str, DefaultHasher, 8>::new("missing", BTreeSet::new());
let missing_dependent =
Node::<DefaultHasher, 8>::new("missing".as_bytes().to_vec(), BTreeSet::new());
let mut dag = TestDag::new();
let mut dep_set = BTreeSet::new();
dep_set.insert(*missing_dependent.id());
@ -60,7 +60,7 @@ fn test_adding_nodes_is_idempotent() {
let quax_node_id = dag.add_node("quax", BTreeSet::new()).unwrap();
assert_eq!(
quax_node_id,
*dag.get_node_by_id(&quax_node_id).unwrap().id()
*dag.get_node_by_id(&quax_node_id).unwrap().unwrap().id()
);
assert!(dag.get_roots().contains(&quax_node_id));
let root_size = dag.get_roots().len();
@ -97,7 +97,7 @@ fn test_node_comparison_equivalent() {
let mut dag = TestDag::new();
let quake_node_id = dag.add_node("quake", BTreeSet::new()).unwrap();
assert_eq!(
dag.compare(&quake_node_id, &quake_node_id),
dag.compare(&quake_node_id, &quake_node_id).unwrap(),
NodeCompare::Equivalent
);
}
@ -113,11 +113,11 @@ fn test_node_comparison_before() {
.add_node("quell", BTreeSet::from([qualm_node_id.clone()]))
.unwrap();
assert_eq!(
dag.compare(&quake_node_id, &qualm_node_id),
dag.compare(&quake_node_id, &qualm_node_id).unwrap(),
NodeCompare::Before
);
assert_eq!(
dag.compare(&quake_node_id, &quell_node_id),
dag.compare(&quake_node_id, &quell_node_id).unwrap(),
NodeCompare::Before
);
}
@ -133,11 +133,11 @@ fn test_node_comparison_after() {
.add_node("quell", BTreeSet::from([qualm_node_id.clone()]))
.unwrap();
assert_eq!(
dag.compare(&qualm_node_id, &quake_node_id),
dag.compare(&qualm_node_id, &quake_node_id).unwrap(),
NodeCompare::After
);
assert_eq!(
dag.compare(&quell_node_id, &quake_node_id),
dag.compare(&quell_node_id, &quake_node_id).unwrap(),
NodeCompare::After
);
}
@ -149,15 +149,15 @@ fn test_node_comparison_no_shared_graph() {
let qualm_node_id = dag.add_node("qualm", BTreeSet::new()).unwrap();
let quell_node_id = dag.add_node("quell", BTreeSet::new()).unwrap();
assert_eq!(
dag.compare(&qualm_node_id, &quake_node_id),
dag.compare(&qualm_node_id, &quake_node_id).unwrap(),
NodeCompare::Uncomparable
);
assert_eq!(
dag.compare(&quell_node_id, &quake_node_id),
dag.compare(&quell_node_id, &quake_node_id).unwrap(),
NodeCompare::Uncomparable
);
assert_eq!(
dag.compare(&quell_node_id, &qualm_node_id),
dag.compare(&quell_node_id, &qualm_node_id).unwrap(),
NodeCompare::Uncomparable
);
}