refactor(deps): Allow duplicate keys in ZipTree

This commit is contained in:
Julius Pfrommer 2023-01-03 17:21:31 +01:00 committed by Julius Pfrommer
parent c005953c14
commit 77c240b6d3
7 changed files with 498 additions and 203 deletions

383
deps/ziptree.c vendored
View File

@ -2,151 +2,179 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
* Copyright 2021 (c) Julius Pfrommer
* Copyright 2021-2022 (c) Julius Pfrommer
*/
#include "ziptree.h"
#if defined(_MSC_VER)
#include <intrin.h>
#endif
/* Dummy types */
struct zip_elem;
typedef struct zip_elem zip_elem;
typedef ZIP_ENTRY(zip_elem) zip_entry;
typedef ZIP_HEAD(, zip_elem) zip_head;
unsigned char
__ZIP_FFS32(unsigned int v) {
#if defined(__GNUC__) || defined(__clang__)
return (unsigned char)((unsigned char)__builtin_ffs((int)v) - 1u);
#elif defined(_MSC_VER)
unsigned long index = 255;
_BitScanForward(&index, v);
return (unsigned char)index;
#else
if(v == 0)
return 255;
unsigned int t = 1;
unsigned char r = 0;
while((v & t) == 0) {
t = t << 1;
r++;
}
return r;
#endif
/* Access macros */
#define ZIP_ENTRY_PTR(x) ((zip_entry*)((char*)x + fieldoffset))
#define ZIP_KEY_PTR(x) (const void*)((const char*)x + keyoffset)
/* Hash pointers to keep the tie-breeaking of equal keys (mostly) uncorrelated
* from the rank (pointer order). Hashing code taken from sdbm-hash
* (http://www.cse.yorku.ca/~oz/hash.html). */
static unsigned int
__ZIP_PTR_HASH(const void *p) {
unsigned int h = 0;
const unsigned char *data = (const unsigned char*)&p;
for(size_t i = 0; i < (sizeof(void*) / sizeof(char)); i++)
h = data[i] + (h << 6) + (h << 16) - h;
return h;
}
/* Generic analog to ZIP_ENTRY(type) */
struct zip_entry {
void *left;
void *right;
unsigned char rank;
};
static ZIP_INLINE enum ZIP_CMP
__ZIP_RANK_CMP(const void *p1, const void *p2) {
/* assert(p1 != p2); */
unsigned int h1 = __ZIP_PTR_HASH(p1);
unsigned int h2 = __ZIP_PTR_HASH(p2);
if(h1 == h2)
return (p1 < p2) ? ZIP_CMP_LESS : ZIP_CMP_MORE;
return (h1 < h2) ? ZIP_CMP_LESS : ZIP_CMP_MORE;
}
#define ZIP_ENTRY_PTR(x) (struct zip_entry*)((char*)x + fieldoffset)
#define ZIP_KEY_PTR(x) (void*)((char*)x + keyoffset)
static ZIP_INLINE enum ZIP_CMP
__ZIP_KEY_CMP(zip_cmp_cb cmp, unsigned short keyoffset,
const void *p1, const void *p2) {
if(p1 == p2)
return ZIP_CMP_EQ;
enum ZIP_CMP order = cmp(ZIP_KEY_PTR(p1), ZIP_KEY_PTR(p2));
if(order == ZIP_CMP_EQ)
return (p1 < p2) ? ZIP_CMP_LESS : ZIP_CMP_MORE;
return order;
}
void *
__ZIP_INSERT(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root, void *elm) {
struct zip_entry *elm_entry = ZIP_ENTRY_PTR(elm);
if(!root) {
elm_entry->left = NULL;
elm_entry->right = NULL;
return elm;
void
__ZIP_INSERT(void *h, zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *elm) {
zip_elem *x = (zip_elem*)elm;
ZIP_ENTRY_PTR(x)->left = NULL;
ZIP_ENTRY_PTR(x)->right = NULL;
zip_head *head = (zip_head*)h;
if(!head->root) {
head->root = x;
return;
}
struct zip_entry *root_entry = ZIP_ENTRY_PTR(root);
enum ZIP_CMP order = cmp(ZIP_KEY_PTR(elm), ZIP_KEY_PTR(root));
if(order == ZIP_CMP_LESS) {
if(__ZIP_INSERT(cmp, fieldoffset, keyoffset, root_entry->left, elm) == elm) {
if(elm_entry->rank < root_entry->rank) {
root_entry->left = elm;
} else {
root_entry->left = elm_entry->right;
elm_entry->right = root;
return elm;
}
}
zip_elem *prev = NULL;
zip_elem *cur = head->root;
enum ZIP_CMP cur_order;
enum ZIP_CMP prev_order;
do {
if(x == cur)
return;
cur_order = __ZIP_KEY_CMP(cmp, keyoffset, x, cur);
if(__ZIP_RANK_CMP(x, cur) == ZIP_CMP_MORE)
break;
prev = cur;
prev_order = cur_order;
cur = (cur_order == ZIP_CMP_MORE) ?
ZIP_ENTRY_PTR(cur)->right : ZIP_ENTRY_PTR(cur)->left;
} while(cur);
if(cur == head->root) {
head->root = x;
} else {
if(__ZIP_INSERT(cmp, fieldoffset, keyoffset, root_entry->right, elm) == elm) {
if(elm_entry->rank <= root_entry->rank) {
root_entry->right = elm;
} else {
root_entry->right = elm_entry->left;
elm_entry->left = root;
return elm;
}
}
if(prev_order == ZIP_CMP_MORE)
ZIP_ENTRY_PTR(prev)->right = x;
else
ZIP_ENTRY_PTR(prev)->left = x;
}
return root;
}
static void *
__ZIP(unsigned short fieldoffset, void *x, void *y) {
if(!x) return y;
if(!y) return x;
struct zip_entry *x_entry = ZIP_ENTRY_PTR(x);
struct zip_entry *y_entry = ZIP_ENTRY_PTR(y);
if(x_entry->rank < y_entry->rank) {
y_entry->left = __ZIP(fieldoffset, x, y_entry->left);
return y;
if(!cur)
return;
if(cur_order != ZIP_CMP_LESS) {
ZIP_ENTRY_PTR(x)->left = cur;
} else {
x_entry->right = __ZIP(fieldoffset, x_entry->right, y);
return x;
ZIP_ENTRY_PTR(x)->right = cur;
}
prev = x;
do {
zip_elem *fix = prev;
if(cur_order != ZIP_CMP_LESS) {
do {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->right;
if(!cur)
break;
cur_order = __ZIP_KEY_CMP(cmp, keyoffset, x, cur);
} while(cur_order == ZIP_CMP_MORE);
} else {
do {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->left;
if(!cur)
break;
cur_order = __ZIP_KEY_CMP(cmp, keyoffset, x, cur);
} while(cur_order == ZIP_CMP_LESS);
}
if((fix != x && __ZIP_KEY_CMP(cmp, keyoffset, x, fix) == ZIP_CMP_LESS) ||
(fix == x && __ZIP_KEY_CMP(cmp, keyoffset, x, prev) == ZIP_CMP_LESS))
ZIP_ENTRY_PTR(fix)->left = cur;
else
ZIP_ENTRY_PTR(fix)->right = cur;
} while(cur);
}
/* Modified from the original algorithm. Allow multiple elements with the same
* key. */
void *
__ZIP_REMOVE(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root, void *elm) {
struct zip_entry *root_entry = ZIP_ENTRY_PTR(root);
if(root == elm)
return __ZIP(fieldoffset, root_entry->left, root_entry->right);
void *left = root_entry->left;
void *right = root_entry->right;
enum ZIP_CMP eq = cmp(ZIP_KEY_PTR(elm), ZIP_KEY_PTR(root));
if(eq == ZIP_CMP_LESS) {
struct zip_entry *left_entry = ZIP_ENTRY_PTR(left);
if(elm == left)
root_entry->left = __ZIP(fieldoffset, left_entry->left, left_entry->right);
else if(left)
__ZIP_REMOVE(cmp, fieldoffset, keyoffset, left, elm);
} else if(eq == ZIP_CMP_MORE) {
struct zip_entry *right_entry = ZIP_ENTRY_PTR(right);
if(elm == right)
root_entry->right = __ZIP(fieldoffset, right_entry->left, right_entry->right);
else if(right)
__ZIP_REMOVE(cmp, fieldoffset, keyoffset, right, elm);
} else { /* ZIP_CMP_EQ, but root != elm */
if(right)
root_entry->right = __ZIP_REMOVE(cmp, fieldoffset, keyoffset, right, elm);
if(left)
root_entry->left = __ZIP_REMOVE(cmp, fieldoffset, keyoffset, left, elm);
void
__ZIP_REMOVE(void *h, zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *elm) {
zip_head *head = (zip_head*)h;
zip_elem *x = (zip_elem*)elm;
zip_elem *cur = head->root;
if(!cur)
return;
zip_elem **prev_edge;
enum ZIP_CMP cur_order = __ZIP_KEY_CMP(cmp, keyoffset, x, cur);
while(cur_order != ZIP_CMP_EQ) {
prev_edge = (cur_order == ZIP_CMP_LESS) ?
&ZIP_ENTRY_PTR(cur)->left : &ZIP_ENTRY_PTR(cur)->right;
cur = *prev_edge;
if(!cur)
return;
cur_order = __ZIP_KEY_CMP(cmp, keyoffset, x, cur);
}
return root;
cur = (zip_elem*)__ZIP_ZIP(fieldoffset,
ZIP_ENTRY_PTR(cur)->left,
ZIP_ENTRY_PTR(cur)->right);
if(head->root == x)
head->root = cur;
else
*prev_edge = cur;
}
void *
__ZIP_FIND(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root, const void *key) {
if(!root)
return NULL;
enum ZIP_CMP eq = cmp(key, ZIP_KEY_PTR(root));
if(eq == ZIP_CMP_EQ)
return root;
struct zip_entry *root_entry = ZIP_ENTRY_PTR(root);
if(eq == ZIP_CMP_LESS)
return __ZIP_FIND(cmp, fieldoffset, keyoffset, root_entry->left, key);
else
return __ZIP_FIND(cmp, fieldoffset, keyoffset, root_entry->right, key);
unsigned short keyoffset, void *cur, const void *key) {
while(cur) {
enum ZIP_CMP eq = cmp(key, ZIP_KEY_PTR(cur));
if(eq == ZIP_CMP_EQ)
return cur;
if(eq == ZIP_CMP_LESS)
cur = ZIP_ENTRY_PTR(cur)->left;
else
cur = ZIP_ENTRY_PTR(cur)->right;
}
return NULL;
}
void *
__ZIP_MIN(unsigned short fieldoffset, void *elm) {
if(!elm)
return NULL;
struct zip_entry *elm_entry = ZIP_ENTRY_PTR(elm);
while(elm_entry->left) {
elm = elm_entry->left;
elm_entry = (struct zip_entry*)((char*)elm + fieldoffset);
while(ZIP_ENTRY_PTR(elm)->left) {
elm = ZIP_ENTRY_PTR(elm)->left;
}
return elm;
}
@ -155,21 +183,120 @@ void *
__ZIP_MAX(unsigned short fieldoffset, void *elm) {
if(!elm)
return NULL;
struct zip_entry *elm_entry = ZIP_ENTRY_PTR(elm);
while(elm_entry->right) {
elm = elm_entry->right;
elm_entry = (struct zip_entry*)((char*)elm + fieldoffset);
while(ZIP_ENTRY_PTR(elm)->right) {
elm = ZIP_ENTRY_PTR(elm)->right;
}
return elm;
}
void
__ZIP_ITER(unsigned short fieldoffset, __zip_iter_cb cb,
void *
__ZIP_ITER(unsigned short fieldoffset, zip_iter_cb cb,
void *context, void *elm) {
if(!elm)
return;
struct zip_entry *elm_entry = ZIP_ENTRY_PTR(elm);
__ZIP_ITER(fieldoffset, cb, context, elm_entry->left);
__ZIP_ITER(fieldoffset, cb, context, elm_entry->right);
cb(elm, context);
return NULL;
zip_elem *left = ZIP_ENTRY_PTR(elm)->left;
zip_elem *right = ZIP_ENTRY_PTR(elm)->right;
void *res = __ZIP_ITER(fieldoffset, cb, context, left);
if(res)
return res;
res = cb(context, elm);
if(res)
return res;
return __ZIP_ITER(fieldoffset, cb, context, right);
}
void *
__ZIP_ZIP(unsigned short fieldoffset, void *left, void *right) {
if(!left)
return right;
if(!right)
return left;
zip_elem *l = (zip_elem*)left;
zip_elem *r = (zip_elem*)right;
zip_elem *root = NULL;
zip_elem **prev_edge = &root;
while(l && r) {
if(__ZIP_RANK_CMP(l, r) == ZIP_CMP_LESS) {
*prev_edge = r;
prev_edge = &ZIP_ENTRY_PTR(r)->left;
r = ZIP_ENTRY_PTR(r)->left;
} else {
*prev_edge = l;
prev_edge = &ZIP_ENTRY_PTR(l)->right;
l = ZIP_ENTRY_PTR(l)->right;
}
}
*prev_edge = (l) ? l : r;
return root;
}
void
__ZIP_UNZIP(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, const void *key,
void *h, void *l, void *r) {
zip_elem *prev;
zip_head *head = (zip_head*)h;
zip_head *left = (zip_head*)l;
zip_head *right = (zip_head*)r;
if(!head->root) {
left->root = NULL;
right->root = NULL;
return;
}
zip_elem *cur = head->root;
enum ZIP_CMP head_order = cmp(key, ZIP_KEY_PTR(cur));
if(head_order == ZIP_CMP_EQ) {
right->root = ZIP_ENTRY_PTR(cur)->right;
ZIP_ENTRY_PTR(cur)->right = NULL;
left->root = cur;
} else if(head_order == ZIP_CMP_MORE) {
left->root = cur;
do {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->right;
if(!cur) {
right->root = NULL;
return;
}
} while(cmp(key, ZIP_KEY_PTR(cur)) != ZIP_CMP_LESS);
right->root = cur;
ZIP_ENTRY_PTR(prev)->right = NULL;
zip_elem *left_rightmost = prev;
while(ZIP_ENTRY_PTR(cur)->left) {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->left;
if(cmp(key, ZIP_KEY_PTR(cur)) != ZIP_CMP_LESS) {
ZIP_ENTRY_PTR(prev)->left = ZIP_ENTRY_PTR(cur)->right;
ZIP_ENTRY_PTR(cur)->right = NULL;
ZIP_ENTRY_PTR(left_rightmost)->right = cur;
left_rightmost = cur;
cur = prev;
}
}
} else {
right->root = cur;
do {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->left;
if(!cur) {
left->root = NULL;
return;
}
} while(cmp(key, ZIP_KEY_PTR(cur)) == ZIP_CMP_LESS);
left->root = cur;
ZIP_ENTRY_PTR(prev)->left = NULL;
zip_elem *right_leftmost = prev;
while(ZIP_ENTRY_PTR(cur)->right) {
prev = cur;
cur = ZIP_ENTRY_PTR(cur)->right;
if(cmp(key, ZIP_KEY_PTR(cur)) == ZIP_CMP_LESS) {
ZIP_ENTRY_PTR(prev)->right = ZIP_ENTRY_PTR(cur)->left;
ZIP_ENTRY_PTR(cur)->left = NULL;
ZIP_ENTRY_PTR(right_leftmost)->left = cur;
right_leftmost = cur;
cur = prev;
}
}
}
}

137
deps/ziptree.h vendored
View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
* Copyright 2018, 2021 (c) Julius Pfrommer
* Copyright 2018, 2021-2022 (c) Julius Pfrommer
*/
#ifndef ZIPTREE_H_
@ -16,7 +16,6 @@
# define ZIP_INLINE inline
#endif
/* Prevent warnings on unused static inline functions for some compilers */
#if defined(__GNUC__) || defined(__clang__)
# define ZIP_UNUSED __attribute__((unused))
#else
@ -32,8 +31,12 @@ extern "C" {
*
* Zip trees were developed in: Tarjan, R. E., Levy, C. C., and Timmel, S. "Zip
* Trees." arXiv preprint arXiv:1806.06726 (2018). The original definition was
* modified so that several elements with the same key can be inserted. However,
* ZIP_FIND will only return the topmost of these elements in the tree.
* modified in two ways:
*
* - Multiple elements with the same key can be inserted. These appear adjacent
* in the tree. ZIP_FIND will return the topmost of these elements.
* - The pointer-value of the elements are used as the rank. This simplifies the
* code and is (empirically) faster.
*
* The ZIP_ENTRY definitions are to be contained in the tree entries themselves.
* Use ZIP_FUNCTIONS to define the signature of the zip tree functions. */
@ -47,7 +50,6 @@ struct name { \
struct { \
struct type *left; \
struct type *right; \
unsigned char rank; \
}
enum ZIP_CMP {
@ -56,74 +58,51 @@ enum ZIP_CMP {
ZIP_CMP_MORE = 1
};
/* The comparison method "cmp" for a zip tree has the signature.
* Provide this to the ZIP_FUNCTIONS macro.
*
* enum ZIP_CMP cmpMethod(const keytype *a, const keytype *b);
*/
typedef enum ZIP_CMP (*zip_cmp_cb)(const void *key1, const void *key2);
#define ZIP_INIT(head) do { (head)->root = NULL; } while (0)
#define ZIP_ROOT(head) (head)->root
#define ZIP_LEFT(elm, field) (elm)->field.left
#define ZIP_RIGHT(elm, field) (elm)->field.right
#define ZIP_RANK(elm, field) (elm)->field.rank
/* Internal definitions. Don't use directly. */
typedef void (*__zip_iter_cb)(void *elm, void *context);
void *
__ZIP_INSERT(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root, void *elm);
void *
__ZIP_REMOVE(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root, void *elm);
void *
__ZIP_FIND(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root,
const void *key);
void
__ZIP_ITER(unsigned short fieldoffset, __zip_iter_cb cb,
void *context, void *elm);
void * __ZIP_MIN(unsigned short fieldoffset, void *elm);
void * __ZIP_MAX(unsigned short fieldoffset, void *elm);
/* Zip trees are a probabilistic data structure. Entries are assigned a
* (non-negative) rank k with probability 1/2^{k+1}. A uniformly sampled random
* number has to be supplied with the insert method. __ZIP_FFS32 extracts from
* it least significant nonzero bit of a 32bit number. This then has the correct
* distribution. */
unsigned char __ZIP_FFS32(unsigned int v);
/* Generate zip tree method definitions with the ZIP_FUNCTIONS macro. The
* comparison method "cmp" defined for every zip tree has the signature
/* ZIP_ITER uses in-order traversal of the tree (in the order of the keys). The
* memory if a node is not accessed by ZIP_ITER after the callback has been
* executed for it. So a tree can be cleaned by calling free on each node from
* within the iteration callback.
*
* enum ZIP_CMP cmpDateTime(const keytype *a, const keytype *b); */
* ZIP_ITER returns a void pointer. The first callback to return non-NULL aborts
* the iteration. This pointer is then returned. */
typedef void * (*zip_iter_cb)(void *context, void *elm);
#define ZIP_INSERT(name, head, elm, rank) name##_ZIP_INSERT(head, elm, rank)
/* Generate typed method definitions with the ZIP_FUNCTIONS macro */
#define ZIP_INSERT(name, head, elm) name##_ZIP_INSERT(head, elm)
#define ZIP_REMOVE(name, head, elm) name##_ZIP_REMOVE(head, elm)
#define ZIP_FIND(name, head, key) name##_ZIP_FIND(head, key)
#define ZIP_ITER(name, head, cb, d) name##_ZIP_ITER(head, cb, d)
#define ZIP_MIN(name, head) name##_ZIP_MIN(head)
#define ZIP_MAX(name, head) name##_ZIP_MAX(head)
#define ZIP_ITER(name, head, cb, d) name##_ZIP_ITER(head, cb, d)
#define ZIP_ZIP(name, left, right) name##_ZIP_ZIP(left, right)
#define ZIP_UNZIP(name, head, key, left, right) \
name##_ZIP_UNZIP(head, key, left, right)
#define ZIP_FUNCTIONS(name, type, field, keytype, keyfield, cmp) \
\
ZIP_UNUSED static ZIP_INLINE void \
name##_ZIP_INSERT(struct name *head, struct type *elm, \
unsigned int r) { \
ZIP_RANK(elm, field) = __ZIP_FFS32(r); \
ZIP_ROOT(head) = (struct type*) \
__ZIP_INSERT(cmp, offsetof(struct type, field), \
offsetof(struct type, keyfield), \
ZIP_ROOT(head), elm); \
name##_ZIP_INSERT(struct name *head, struct type *el) { \
__ZIP_INSERT(head, cmp, offsetof(struct type, field), \
offsetof(struct type, keyfield), el); \
} \
\
ZIP_UNUSED static ZIP_INLINE void \
name##_ZIP_REMOVE(struct name *head, struct type *elm) { \
ZIP_ROOT(head) = (struct type*) \
__ZIP_REMOVE(cmp, offsetof(struct type, field), \
offsetof(struct type, keyfield), \
ZIP_ROOT(head), elm); \
__ZIP_REMOVE(head, cmp, offsetof(struct type, field), \
offsetof(struct type, keyfield), elm); \
} \
\
ZIP_UNUSED static ZIP_INLINE struct type * \
@ -145,14 +124,58 @@ name##_ZIP_MAX(struct name *head) { \
ZIP_ROOT(head)); \
} \
\
typedef void (*name##_cb)(struct type *elm, void *context); \
typedef void * (*name##_cb)(void *context, struct type *elm); \
\
ZIP_UNUSED static ZIP_INLINE void * \
name##_ZIP_ITER(struct name *head, name##_cb cb, void *context) { \
return __ZIP_ITER(offsetof(struct type, field), (zip_iter_cb)cb, \
context, ZIP_ROOT(head)); \
} \
\
ZIP_UNUSED static ZIP_INLINE struct type * \
name##_ZIP_ZIP(struct type *left, struct type *right) { \
return (struct type*) \
__ZIP_ZIP(offsetof(struct type, field), left, right); \
} \
\
ZIP_UNUSED static ZIP_INLINE void \
name##_ZIP_ITER(struct name *head, name##_cb cb, void *context) { \
__ZIP_ITER(offsetof(struct type, field), (__zip_iter_cb)cb, \
context, ZIP_ROOT(head)); \
name##_ZIP_UNZIP(struct name *head, const keytype *key, \
struct name *left, struct name *right) { \
__ZIP_UNZIP(cmp, offsetof(struct type, field), \
offsetof(struct type, keyfield), key, \
head, left, right); \
}
/* Internal definitions. Don't use directly. */
void
__ZIP_INSERT(void *h, zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *elm);
void
__ZIP_REMOVE(void *h, zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *elm);
void *
__ZIP_FIND(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, void *root,
const void *key);
void *
__ZIP_ITER(unsigned short fieldoffset, zip_iter_cb cb,
void *context, void *elm);
void * __ZIP_MIN(unsigned short fieldoffset, void *elm);
void * __ZIP_MAX(unsigned short fieldoffset, void *elm);
void *
__ZIP_ZIP(unsigned short fieldoffset, void *left, void *right);
void
__ZIP_UNZIP(zip_cmp_cb cmp, unsigned short fieldoffset,
unsigned short keyoffset, const void *key,
void *h, void *l, void *r);
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@ -273,7 +273,7 @@ zipNsInsertNode(void *nsCtx, UA_Node *node, UA_NodeId *addedNodeId) {
/* Insert the node */
entry->nodeIdHash = dummy.nodeIdHash;
ZIP_INSERT(NodeTree, &ns->root, entry, UA_UInt32_random());
ZIP_INSERT(NodeTree, &ns->root, entry);
return UA_STATUSCODE_GOOD;
}
@ -302,7 +302,7 @@ zipNsReplaceNode(void *nsCtx, UA_Node *node) {
ZipContext *ns = (ZipContext*)nsCtx;
ZIP_REMOVE(NodeTree, &ns->root, oldEntry);
entry->nodeIdHash = oldEntry->nodeIdHash;
ZIP_INSERT(NodeTree, &ns->root, entry, ZIP_RANK(entry, zipfields));
ZIP_INSERT(NodeTree, &ns->root, entry);
oldEntry->deleted = true;
zipNsReleaseNode(nsCtx, oldNode);
@ -337,10 +337,11 @@ struct VisitorData {
void *visitorContext;
};
static void
nodeVisitor(NodeEntry *entry, void *data) {
static void *
nodeVisitor(void *data, NodeEntry *entry) {
struct VisitorData *d = (struct VisitorData*)data;
d->visitor(d->visitorContext, (UA_Node*)&entry->nodeId);
return NULL;
}
static void
@ -353,9 +354,10 @@ zipNsIterate(void *nsCtx, UA_NodestoreVisitor visitor,
ZIP_ITER(NodeTree, &ns->root, nodeVisitor, &d);
}
static void
deleteNodeVisitor(NodeEntry *entry, void *data) {
static void *
deleteNodeVisitor(void *data, NodeEntry *entry) {
deleteEntry(entry);
return NULL;
}
/***********************/

View File

@ -252,17 +252,18 @@ UA_Client_Subscriptions_modify_async(UA_Client *client,
cc, requestId);
}
static void
UA_MonitoredItem_delete_wrapper(UA_Client_MonitoredItem *mon, void *data) {
static void *
UA_MonitoredItem_delete_wrapper(void *data, UA_Client_MonitoredItem *mon) {
struct UA_Client_MonitoredItem_ForDelete *deleteMonitoredItem =
(struct UA_Client_MonitoredItem_ForDelete *)data;
if(deleteMonitoredItem != NULL) {
if(deleteMonitoredItem->monitoredItemId != NULL &&
(mon->monitoredItemId != *deleteMonitoredItem->monitoredItemId)) {
return;
return NULL;
}
MonitoredItem_delete(deleteMonitoredItem->client, deleteMonitoredItem->sub, mon);
}
return NULL;
}
static void
@ -505,7 +506,7 @@ ua_MonitoredItems_create(UA_Client *client, MonitoredItems_CreateData *data,
data->handlingCallbacks[i];
newMon->isEventMonitoredItem =
(request->itemsToCreate[i].itemToMonitor.attributeId == UA_ATTRIBUTEID_EVENTNOTIFIER);
ZIP_INSERT(MonitorItemsTree, &sub->monitoredItems, newMon, UA_UInt32_random());
ZIP_INSERT(MonitorItemsTree, &sub->monitoredItems, newMon);
UA_LOG_DEBUG(&client->config.logger, UA_LOGCATEGORY_CLIENT,
"Subscription %" PRIu32 " | Added a MonitoredItem with handle %" PRIu32,
@ -929,15 +930,14 @@ UA_Client_MonitoredItems_deleteSingle(UA_Client *client, UA_UInt32 subscriptionI
return retval;
}
static void
UA_MonitoredItem_change_clientHandle(UA_Client_MonitoredItem *mon, void *data) {
static void *
UA_MonitoredItem_change_clientHandle(void *data, UA_Client_MonitoredItem *mon) {
UA_MonitoredItemModifyRequest *monitoredItemModifyRequest =
(UA_MonitoredItemModifyRequest *)data;
if(monitoredItemModifyRequest != NULL) {
if(mon->monitoredItemId == monitoredItemModifyRequest->monitoredItemId) {
monitoredItemModifyRequest->requestedParameters.clientHandle = mon->clientHandle;
}
}
if(monitoredItemModifyRequest &&
mon->monitoredItemId == monitoredItemModifyRequest->monitoredItemId)
monitoredItemModifyRequest->requestedParameters.clientHandle = mon->clientHandle;
return NULL;
}
UA_ModifyMonitoredItemsResponse

View File

@ -286,7 +286,7 @@ RefTree_add(RefTree *rt, UA_NodePointer target, UA_Boolean *duplicate) {
(sizeof(RefEntry) * rt->size));
re->target = &rt->targets[rt->size];
re->targetHash = dummy.targetHash;
ZIP_INSERT(RefHead, &rt->head, re, UA_UInt32_random());
ZIP_INSERT(RefHead, &rt->head, re);
rt->size++;
return UA_STATUSCODE_GOOD;
}

View File

@ -218,6 +218,7 @@ function(ua_add_test test_path_relative)
endfunction()
ua_add_test(check_types_builtin.c)
ua_add_test(check_ziptree.c)
if(UA_ENABLE_JSON_ENCODING)
ua_add_test(check_cj5.c)

142
tests/check_ziptree.c Normal file
View File

@ -0,0 +1,142 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <check.h>
#include <stdio.h>
#include <stdlib.h>
#include "ziptree.h"
#include "assert.h"
struct treeEntry {
unsigned int key;
ZIP_ENTRY(treeEntry) pointers;
};
ZIP_HEAD(tree, treeEntry);
static enum ZIP_CMP
compareKeys(const void *k1, const void *k2) {
unsigned int key1 = *(const unsigned int*)k1;
unsigned int key2 = *(const unsigned int*)k2;
if(key1 == key2)
return ZIP_CMP_EQ;
return (key1 < key2) ? ZIP_CMP_LESS : ZIP_CMP_MORE;
}
ZIP_FUNCTIONS(tree, treeEntry, pointers, unsigned int, key, compareKeys)
static void
checkTree(struct treeEntry *e) {
if(!e)
return;
struct treeEntry *left = e->pointers.left;
if(left)
ck_assert_uint_le(left->key, e->key);
struct treeEntry *right = e->pointers.right;
if(right)
ck_assert_uint_ge(right->key, e->key);
checkTree(left);
checkTree(right);
}
START_TEST(randTree) {
srand(0);
struct tree t1 = {NULL};
for(unsigned int i = 0; i < 100; i++) {
struct treeEntry *e1 = (struct treeEntry*)malloc(sizeof(struct treeEntry));
e1->key = (unsigned int)rand();
ZIP_INSERT(tree, &t1, e1);
}
checkTree(t1.root);
while(t1.root) {
checkTree(t1.root);
struct treeEntry *left = t1.root->pointers.left;
struct treeEntry *right = t1.root->pointers.right;
free(t1.root);
t1.root = ZIP_ZIP(tree, left, right);
}
} END_TEST
START_TEST(mergeTrees) {
struct tree t1 = {NULL};
struct tree t2 = {NULL};
for(unsigned int i = 0; i < 100; i++) {
struct treeEntry *e1 = (struct treeEntry*)malloc(sizeof(struct treeEntry));
struct treeEntry *e2 = (struct treeEntry*)malloc(sizeof(struct treeEntry));
e1->key = i;
e2->key = i + 1000;
ZIP_INSERT(tree, &t1, e1);
ZIP_INSERT(tree, &t2, e2);
}
checkTree(t1.root);
checkTree(t2.root);
struct tree t3;
t3.root = ZIP_ZIP(tree, t1.root, t2.root);
checkTree(t3.root);
while(t3.root) {
checkTree(t3.root);
struct treeEntry *left = t3.root->pointers.left;
struct treeEntry *right = t3.root->pointers.right;
free(t3.root);
t3.root = ZIP_ZIP(tree, left, right);
}
} END_TEST
START_TEST(splitTree) {
struct tree t1 = {NULL};
for(unsigned int i = 0; i < 100; i++) {
struct treeEntry *e1 = (struct treeEntry*)malloc(sizeof(struct treeEntry));
struct treeEntry *e2 = (struct treeEntry*)malloc(sizeof(struct treeEntry));
e1->key = i;
e2->key = i;
ZIP_INSERT(tree, &t1, e1);
ZIP_INSERT(tree, &t1, e2);
}
checkTree(t1.root);
for(unsigned int split_key = 50; split_key < 60; split_key++) {
struct tree t2;
struct tree t3;
ZIP_UNZIP(tree, &t1, &split_key, &t2, &t3);
checkTree(t2.root);
checkTree(t3.root);
t1.root = ZIP_ZIP(tree, t2.root, t3.root);
checkTree(t1.root);
}
while(t1.root) {
checkTree(t1.root);
struct treeEntry *left = t1.root->pointers.left;
struct treeEntry *right = t1.root->pointers.right;
free(t1.root);
t1.root = ZIP_ZIP(tree, left, right);
}
} END_TEST
int main(void) {
int number_failed = 0;
TCase *tc_parse = tcase_create("ziptree");
tcase_add_test(tc_parse, randTree);
tcase_add_test(tc_parse, mergeTrees);
tcase_add_test(tc_parse, splitTree);
Suite *s = suite_create("Test ziptree library");
suite_add_tcase(s, tc_parse);
SRunner *sr = srunner_create(s);
srunner_set_fork_status(sr, CK_NOFORK);
srunner_run_all(sr, CK_NORMAL);
number_failed += srunner_ntests_failed(sr);
srunner_free(sr);
return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
}