[RFC PATCH v2 05/12] landlock/coalesced_hash: Implement insert
Tingmao Wang
m at maowtm.org
Sun Jul 6 15:16:46 UTC 2025
This algorithm is a slight alteration of the one on Wikipedia at the time
of writing [1]. The difference is that when we are trying to insert into
a slot that is already being used (whether by an element that doesn't
actually belong there, and is just in a collision chain of a different
hash, or whether it is the head of a chain and thus has the correct hash),
we move the existing element away and insert the new element in its place.
The result is that if there is some element in the hash table with a
certain hash, the slot corresponding to that hash will always be the slot
that starts the collision chain for that hash. In order words, chains
won't "mix" and if we detect that the hash of the element at the slot
we're targeting is not correct, we know that the hash table does not
contain the hash we want.
[1]: https://en.wikipedia.org/w/index.php?title=Coalesced_hashing&oldid=1214362866
This patch seems to have hit a checkpatch false positive:
ERROR: need consistent spacing around '*' (ctx:WxV)
#281: FILE: security/landlock/coalesced_hash.h:349:
+ elem_type *table, h_index_t table_size) \
^
ERROR: need consistent spacing around '*' (ctx:WxV)
#288: FILE: security/landlock/coalesced_hash.h:356:
+ struct h_insert_scratch *scratch, const elem_type *elem) \
^
Since this is kinda a niche use-case, I will make a report only after this
series gets out of RFC (and if they still show up).
Signed-off-by: Tingmao Wang <m at maowtm.org>
---
security/landlock/coalesced_hash.h | 265 +++++++++++++++++++++++++++++
1 file changed, 265 insertions(+)
diff --git a/security/landlock/coalesced_hash.h b/security/landlock/coalesced_hash.h
index 14950915f0b5..56d32cf70d67 100644
--- a/security/landlock/coalesced_hash.h
+++ b/security/landlock/coalesced_hash.h
@@ -68,6 +68,256 @@ static inline void *h_find(const void *table, h_index_t table_size,
return NULL;
}
+static inline void h_initialize(void *table, h_index_t table_size,
+ size_t elem_size,
+ set_next_collision_t set_next_collision,
+ element_is_empty_t element_is_empty)
+{
+ h_index_t i;
+ void *elem;
+
+ WARN_ON_ONCE(array_size(table_size, elem_size) == SIZE_MAX);
+
+ for (i = 0; i < table_size; i++) {
+ elem = table + i * elem_size;
+ set_next_collision(elem, i);
+ }
+}
+
+struct h_insert_scratch {
+ /**
+ * @prev_index: For each slot which belongs in a collision chain,
+ * stores the index of the previous element in the chain. (The next
+ * index in the chain is stored in the element itself.)
+ */
+ h_index_t *prev_index;
+ /**
+ * @next_free_index: This index moves from end of the table towards
+ * the beginning.
+ */
+ h_index_t next_free_index;
+
+ /*
+ * The following members just helps us avoid passing those arguments
+ * around all the time.
+ */
+ h_index_t table_size;
+ void *table;
+ size_t elem_size;
+};
+
+static inline int h_init_insert_scratch(struct h_insert_scratch *scratch,
+ void *table, h_index_t table_size,
+ size_t elem_size)
+{
+ h_index_t i;
+
+ if (table_size == 0) {
+ memset(scratch, 0, sizeof(*scratch));
+ scratch->table = table;
+ scratch->elem_size = elem_size;
+ return 0;
+ }
+
+ if (array_size(table_size, elem_size) == SIZE_MAX)
+ return -ENOMEM;
+
+ scratch->prev_index =
+ kcalloc(table_size, sizeof(h_index_t), GFP_KERNEL_ACCOUNT);
+ if (!scratch->prev_index)
+ return -ENOMEM;
+
+ for (i = 0; i < table_size; i++)
+ scratch->prev_index[i] = i;
+
+ scratch->table_size = table_size;
+ scratch->next_free_index = table_size - 1;
+ scratch->table = table;
+ scratch->elem_size = elem_size;
+ return 0;
+}
+
+static inline void h_free_insert_scratch(struct h_insert_scratch *scratch)
+{
+ if (!scratch)
+ return;
+
+ kfree(scratch->prev_index);
+ memset(scratch, 0, sizeof(*scratch));
+}
+
+static inline h_index_t
+__h_insert_get_next_free(struct h_insert_scratch *scratch,
+ element_is_empty_t element_is_empty)
+{
+ h_index_t index_to_ret = scratch->next_free_index;
+ void *free_slot;
+
+ if (WARN_ON_ONCE(index_to_ret >= scratch->table_size)) {
+ /*
+ * We used up all the available slots already. This isn't
+ * supposed to happen with correct usage.
+ */
+ return 0;
+ }
+
+ free_slot = scratch->table + index_to_ret * scratch->elem_size;
+ while (!element_is_empty(free_slot)) {
+ if (WARN_ON_ONCE(index_to_ret == 0)) {
+ /* We unexpectedly used up all slots. */
+ return 0;
+ }
+ index_to_ret--;
+ free_slot = scratch->table + index_to_ret * scratch->elem_size;
+ }
+
+ if (index_to_ret == 0)
+ scratch->next_free_index = scratch->table_size;
+ else
+ scratch->next_free_index = index_to_ret - 1;
+
+ return index_to_ret;
+}
+
+/**
+ * __h_relocate_collision_slot - Moves the element at idx_to_move to
+ * another free slot, and make the original slot free. We will update any
+ * chain links (scratch->prev_index and target->next_collision).
+ *
+ * Returns the new index of the moved element.
+ */
+static inline h_index_t
+__h_relocate_collision_slot(struct h_insert_scratch *scratch,
+ h_index_t idx_to_move,
+ get_next_collision_t get_next_collision,
+ set_next_collision_t set_next_collision,
+ element_is_empty_t element_is_empty)
+{
+ h_index_t move_to = __h_insert_get_next_free(scratch, element_is_empty);
+ void *move_to_elem = scratch->table + move_to * scratch->elem_size;
+ void *move_target_elem =
+ scratch->table + idx_to_move * scratch->elem_size;
+ h_index_t old_next = get_next_collision(move_target_elem);
+ h_index_t old_prev = scratch->prev_index[idx_to_move];
+ void *old_prev_elem = scratch->table + old_prev * scratch->elem_size;
+
+ memcpy(move_to_elem, move_target_elem, scratch->elem_size);
+
+ /*
+ * The logic here is essentially hlist_replace_rcu, except in the
+ * hlist case the tail have next == NULL, whereas in our case the tail
+ * has next set to itself.
+ */
+
+ /*
+ * if move target already points to something else, it would have been
+ * memcpy'd across.
+ */
+ if (old_next == idx_to_move)
+ /*
+ * Need to fix the tail pointer - it points to itself. It's own
+ * prev is correct already.
+ */
+ set_next_collision(move_to_elem, move_to);
+ else
+ /*
+ * the next_collision would have been memcpy'd over, but we need to
+ * fix that next element's prev
+ */
+ scratch->prev_index[old_next] = move_to;
+
+ if (old_prev == idx_to_move)
+ /* The moved element is a head. Fix its prev. */
+ scratch->prev_index[move_to] = move_to;
+ else {
+ /*
+ * Need to make the moved element's prev point to it, and copy over
+ * the prev pointer.
+ */
+ set_next_collision(old_prev_elem, move_to);
+ scratch->prev_index[move_to] = old_prev;
+ }
+
+ scratch->prev_index[idx_to_move] = idx_to_move;
+ memset(move_target_elem, 0, scratch->elem_size);
+ set_next_collision(move_target_elem, idx_to_move);
+
+ return move_to;
+}
+
+static inline void h_insert(struct h_insert_scratch *scratch, const void *elem,
+ hash_element_t hash_elem,
+ get_next_collision_t get_next_collision,
+ set_next_collision_t set_next_collision,
+ element_is_empty_t element_is_empty)
+{
+ h_index_t target_idx, target_hash, moved_to;
+ void *target_elem;
+
+ if (WARN_ON_ONCE(!scratch->table || !scratch->table_size))
+ return;
+ if (WARN_ON_ONCE(scratch->next_free_index >= scratch->table_size))
+ /*
+ * We used up all the available slots already. This isn't
+ * supposed to happen with correct usage.
+ */
+ return;
+ if (WARN_ON_ONCE(element_is_empty(elem)))
+ return;
+
+ /*
+ * The general logic here is basically that we always insert the new
+ * element at its rightful place, but we move any existing element in
+ * that place around. Consider these cases:
+ *
+ * 1. target slot is empty - we can just insert the new element.
+ *
+ * 2. target slot is occupied by an element that is in a collision
+ * chain (but not the head).
+ * In this case, we can just move that existing element to a free
+ * slot, and insert the new element in its rightful place. This
+ * will start a new chain (the fact that the target slot is not a
+ * head means) that there is no existing chain with this hash.
+ *
+ * 3. target slot is occupied by the head of a chain (i.e. that
+ * existing element is already in its "rightful place"). In this
+ * case, we can still move that existing element to a free slot,
+ * and steals its current place to use for the new element. The
+ * new element will become the new head of the chain, and will
+ * point to the existing element.
+ */
+
+ target_idx = hash_elem(elem, scratch->table_size);
+ if (WARN_ON_ONCE(target_idx >= scratch->table_size))
+ return;
+ target_elem = scratch->table + target_idx * scratch->elem_size;
+
+ if (element_is_empty(target_elem)) {
+ /*
+ * Simple case - just insert it. scratch->prev_index is already
+ * correctly initialized.
+ */
+ memcpy(target_elem, elem, scratch->elem_size);
+ set_next_collision(target_elem, target_idx);
+ } else {
+ target_hash = hash_elem(target_elem, scratch->table_size);
+ moved_to = __h_relocate_collision_slot(scratch, target_idx,
+ get_next_collision,
+ set_next_collision,
+ element_is_empty);
+ memcpy(target_elem, elem, scratch->elem_size);
+ if (target_hash == target_idx) {
+ /* We should be in the collision chain of the original target */
+ set_next_collision(target_elem, moved_to);
+ WARN_ON_ONCE(scratch->prev_index[moved_to] != moved_to);
+ scratch->prev_index[moved_to] = target_idx;
+ } else {
+ /* We are starting a new chain. */
+ set_next_collision(target_elem, target_idx);
+ }
+ }
+}
+
/**
* DEFINE_COALESCED_HASH_TABLE - Define a set of functions to mainpulate a
* coalesced hash table holding elements of type @elem_type.
@@ -127,4 +377,19 @@ static inline void *h_find(const void *table, h_index_t table_size,
table_func_prefix##_get_next_collision, \
table_func_prefix##_compare_elem, \
table_func_prefix##_element_is_empty); \
+ } \
+ static inline void table_func_prefix##_initialize( \
+ elem_type *table, h_index_t table_size) \
+ { \
+ h_initialize(table, table_size, sizeof(elem_type), \
+ table_func_prefix##_set_next_collision, \
+ table_func_prefix##_element_is_empty); \
+ } \
+ static inline void table_func_prefix##_insert( \
+ struct h_insert_scratch *scratch, const elem_type *elem) \
+ { \
+ h_insert(scratch, elem, table_func_prefix##_hash_elem, \
+ table_func_prefix##_get_next_collision, \
+ table_func_prefix##_set_next_collision, \
+ table_func_prefix##_element_is_empty); \
}
--
2.49.0
More information about the Linux-security-module-archive
mailing list