refactor: rename self

master
Sean McBride 3 years ago
parent 7f931c5ebb
commit 7d91a9cfc0

@ -10,6 +10,6 @@ struct admissions_info {
uint64_t relative_deadline; /* Relative deadline in cycles. This is duplicated state */
};
void admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
void admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution,
uint64_t relative_deadline);
void admissions_info_update(struct admissions_info *self, uint64_t execution_duration);
void admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration);

@ -27,4 +27,4 @@ struct http_request {
bool message_end; /* boolean flag set when body processing is complete */
};
void http_request_print(struct http_request *self);
void http_request_print(struct http_request *http_request);

@ -159,14 +159,14 @@ module_release(struct module *module)
}
static inline struct wasm_stack *
module_allocate_stack(struct module *self)
module_allocate_stack(struct module *module)
{
assert(self != NULL);
assert(module != NULL);
struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&self->pools[worker_thread_idx].stack);
struct wasm_stack *stack = wasm_stack_pool_remove_nolock(&module->pools[worker_thread_idx].stack);
if (stack == NULL) {
stack = wasm_stack_new(self->stack_size);
stack = wasm_stack_new(module->stack_size);
if (unlikely(stack == NULL)) return NULL;
}
@ -174,10 +174,10 @@ module_allocate_stack(struct module *self)
}
static inline void
module_free_stack(struct module *self, struct wasm_stack *stack)
module_free_stack(struct module *module, struct wasm_stack *stack)
{
wasm_stack_reinit(stack);
wasm_stack_pool_add_nolock(&self->pools[worker_thread_idx].stack, stack);
wasm_stack_pool_add_nolock(&module->pools[worker_thread_idx].stack, stack);
}
static inline struct wasm_memory *

@ -12,122 +12,122 @@
/**
* Initializes perf window
* @param self
* @param perf_window
*/
static inline void
perf_window_initialize(struct perf_window *self)
perf_window_initialize(struct perf_window *perf_window)
{
assert(self != NULL);
assert(perf_window != NULL);
LOCK_INIT(&self->lock);
self->count = 0;
memset(&self->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&self->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
LOCK_INIT(&perf_window->lock);
perf_window->count = 0;
memset(&perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(&perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
}
/**
* Swaps two execution nodes in the by_duration array, including updating the indices in the by_termination circular
* buffer
* @param self
* @param perf_window
* @param first_by_duration_idx
* @param second_by_duration_idx
*/
static inline void
perf_window_swap(struct perf_window *self, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx)
perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx)
{
assert(LOCK_IS_LOCKED(&self->lock));
assert(self != NULL);
assert(LOCK_IS_LOCKED(&perf_window->lock));
assert(perf_window != NULL);
assert(first_by_duration_idx >= 0 && first_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
assert(second_by_duration_idx >= 0 && second_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
uint16_t first_by_termination_idx = self->by_duration[first_by_duration_idx].by_termination_idx;
uint16_t second_by_termination_idx = self->by_duration[second_by_duration_idx].by_termination_idx;
uint16_t first_by_termination_idx = perf_window->by_duration[first_by_duration_idx].by_termination_idx;
uint16_t second_by_termination_idx = perf_window->by_duration[second_by_duration_idx].by_termination_idx;
/* The execution node's by_termination_idx points to a by_termination cell equal to its own by_duration index */
assert(self->by_termination[first_by_termination_idx] == first_by_duration_idx);
assert(self->by_termination[second_by_termination_idx] == second_by_duration_idx);
assert(perf_window->by_termination[first_by_termination_idx] == first_by_duration_idx);
assert(perf_window->by_termination[second_by_termination_idx] == second_by_duration_idx);
uint64_t first_execution_time = self->by_duration[first_by_duration_idx].execution_time;
uint64_t second_execution_time = self->by_duration[second_by_duration_idx].execution_time;
uint64_t first_execution_time = perf_window->by_duration[first_by_duration_idx].execution_time;
uint64_t second_execution_time = perf_window->by_duration[second_by_duration_idx].execution_time;
/* Swap Indices in Buffer*/
self->by_termination[first_by_termination_idx] = second_by_duration_idx;
self->by_termination[second_by_termination_idx] = first_by_duration_idx;
perf_window->by_termination[first_by_termination_idx] = second_by_duration_idx;
perf_window->by_termination[second_by_termination_idx] = first_by_duration_idx;
/* Swap by_termination_idx */
struct execution_node tmp_node = self->by_duration[first_by_duration_idx];
self->by_duration[first_by_duration_idx] = self->by_duration[second_by_duration_idx];
self->by_duration[second_by_duration_idx] = tmp_node;
struct execution_node tmp_node = perf_window->by_duration[first_by_duration_idx];
perf_window->by_duration[first_by_duration_idx] = perf_window->by_duration[second_by_duration_idx];
perf_window->by_duration[second_by_duration_idx] = tmp_node;
/* The circular by_termination indices should always point to the same execution times across all swaps */
assert(self->by_duration[self->by_termination[first_by_termination_idx]].execution_time
assert(perf_window->by_duration[perf_window->by_termination[first_by_termination_idx]].execution_time
== first_execution_time);
assert(self->by_duration[self->by_termination[second_by_termination_idx]].execution_time
assert(perf_window->by_duration[perf_window->by_termination[second_by_termination_idx]].execution_time
== second_execution_time);
}
/**
* Adds a new value to the perf window
* Not intended to be called directly!
* @param self
* @param perf_window
* @param value
*/
static inline void
perf_window_add(struct perf_window *self, uint64_t value)
perf_window_add(struct perf_window *perf_window, uint64_t value)
{
assert(self != NULL);
assert(perf_window != NULL);
uint16_t idx_of_oldest;
bool check_up;
if (unlikely(!LOCK_IS_LOCKED(&self->lock))) panic("lock not held when calling perf_window_add\n");
if (unlikely(!LOCK_IS_LOCKED(&perf_window->lock))) panic("lock not held when calling perf_window_add\n");
/* A successful invocation should run for a non-zero amount of time */
assert(value > 0);
/* If count is 0, then fill entire array with initial execution times */
if (self->count == 0) {
if (perf_window->count == 0) {
for (int i = 0; i < PERF_WINDOW_BUFFER_SIZE; i++) {
self->by_termination[i] = i;
self->by_duration[i] = (struct execution_node){ .execution_time = value,
perf_window->by_termination[i] = i;
perf_window->by_duration[i] = (struct execution_node){ .execution_time = value,
.by_termination_idx = i };
}
self->count = PERF_WINDOW_BUFFER_SIZE;
perf_window->count = PERF_WINDOW_BUFFER_SIZE;
goto done;
}
/* Otherwise, replace the oldest value, and then sort */
idx_of_oldest = self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE];
check_up = value > self->by_duration[idx_of_oldest].execution_time;
idx_of_oldest = perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE];
check_up = value > perf_window->by_duration[idx_of_oldest].execution_time;
self->by_duration[idx_of_oldest].execution_time = value;
perf_window->by_duration[idx_of_oldest].execution_time = value;
if (check_up) {
for (uint16_t i = idx_of_oldest;
i + 1 < PERF_WINDOW_BUFFER_SIZE
&& self->by_duration[i + 1].execution_time < self->by_duration[i].execution_time;
&& perf_window->by_duration[i + 1].execution_time < perf_window->by_duration[i].execution_time;
i++) {
perf_window_swap(self, i, i + 1);
perf_window_swap(perf_window, i, i + 1);
}
} else {
for (int i = idx_of_oldest;
i - 1 >= 0 && self->by_duration[i - 1].execution_time > self->by_duration[i].execution_time; i--) {
perf_window_swap(self, i, i - 1);
i - 1 >= 0 && perf_window->by_duration[i - 1].execution_time > perf_window->by_duration[i].execution_time; i--) {
perf_window_swap(perf_window, i, i - 1);
}
}
/* The idx that we replaces should still point to the same value */
assert(self->by_duration[self->by_termination[self->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value);
assert(perf_window->by_duration[perf_window->by_termination[perf_window->count % PERF_WINDOW_BUFFER_SIZE]].execution_time == value);
/* The by_duration array should be ordered by execution time */
#ifndef NDEBUG
for (int i = 1; i < PERF_WINDOW_BUFFER_SIZE; i++) {
assert(self->by_duration[i - 1].execution_time <= self->by_duration[i].execution_time);
assert(perf_window->by_duration[i - 1].execution_time <= perf_window->by_duration[i].execution_time);
}
#endif
self->count++;
perf_window->count++;
done:
return;
@ -135,22 +135,22 @@ done:
/**
* Returns pXX execution time
* @param self
* @param perf_window
* @param percentile represented by int between 50 and 99
* @param precomputed_index memoized index for quick lookup when by_duration is full
* @returns execution time
*/
static inline uint64_t
perf_window_get_percentile(struct perf_window *self, int percentile, int precomputed_index)
perf_window_get_percentile(struct perf_window *perf_window, int percentile, int precomputed_index)
{
assert(self != NULL);
assert(perf_window != NULL);
assert(percentile >= 50 && percentile <= 99);
int size = self->count;
int size = perf_window->count;
assert(size > 0);
if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return self->by_duration[precomputed_index].execution_time;
if (likely(size >= PERF_WINDOW_BUFFER_SIZE)) return perf_window->by_duration[precomputed_index].execution_time;
return self->by_duration[size * percentile / 100].execution_time;
return perf_window->by_duration[size * percentile / 100].execution_time;
}
/**
@ -158,9 +158,9 @@ perf_window_get_percentile(struct perf_window *self, int percentile, int precomp
* @returns total count
*/
static inline uint64_t
perf_window_get_count(struct perf_window *self)
perf_window_get_count(struct perf_window *perf_window)
{
assert(self != NULL);
assert(perf_window != NULL);
return self->count;
return perf_window->count;
}

@ -37,36 +37,36 @@ struct priority_queue {
* @returns value of highest priority value in queue or ULONG_MAX if empty
*/
static inline uint64_t
priority_queue_peek(struct priority_queue *self)
priority_queue_peek(struct priority_queue *priority_queue)
{
return self->highest_priority;
return priority_queue->highest_priority;
}
static inline void
priority_queue_update_highest_priority(struct priority_queue *self, const uint64_t priority)
priority_queue_update_highest_priority(struct priority_queue *priority_queue, const uint64_t priority)
{
self->highest_priority = priority;
priority_queue->highest_priority = priority;
}
/**
* Adds a value to the end of the binary heap
* @param self the priority queue
* @param priority_queue the priority queue
* @param new_item the value we are adding
* @return 0 on success. -ENOSPC when priority queue is full
*/
static inline int
priority_queue_append(struct priority_queue *self, void *new_item)
priority_queue_append(struct priority_queue *priority_queue, void *new_item)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(new_item != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int rc;
if (unlikely(self->size + 1 > self->capacity)) panic("PQ overflow");
if (unlikely(self->size + 1 == self->capacity)) goto err_enospc;
self->items[++self->size] = new_item;
if (unlikely(priority_queue->size + 1 > priority_queue->capacity)) panic("PQ overflow");
if (unlikely(priority_queue->size + 1 == priority_queue->capacity)) goto err_enospc;
priority_queue->items[++priority_queue->size] = new_item;
rc = 0;
done:
@ -78,71 +78,76 @@ err_enospc:
/**
* Checks if a priority queue is empty
* @param self the priority queue to check
* @param priority_queue the priority queue to check
* @returns true if empty, else otherwise
*/
static inline bool
priority_queue_is_empty(struct priority_queue *self)
priority_queue_is_empty(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
return self->size == 0;
return priority_queue->size == 0;
}
/**
* Shifts an appended value upwards to restore heap structure property
* @param self the priority queue
* @param priority_queue the priority queue
*/
static inline void
priority_queue_percolate_up(struct priority_queue *self)
priority_queue_percolate_up(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
/* If there's only one element, set memoized lookup and early out */
if (self->size == 1) {
priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (priority_queue->size == 1) {
priority_queue_update_highest_priority(priority_queue,
priority_queue->get_priority_fn(priority_queue->items[1]));
return;
}
for (int i = self->size;
i / 2 != 0 && self->get_priority_fn(self->items[i]) < self->get_priority_fn(self->items[i / 2]); i /= 2) {
assert(self->get_priority_fn(self->items[i]) != ULONG_MAX);
void *temp = self->items[i / 2];
self->items[i / 2] = self->items[i];
self->items[i] = temp;
for (int i = priority_queue->size; i / 2 != 0
&& priority_queue->get_priority_fn(priority_queue->items[i])
< priority_queue->get_priority_fn(priority_queue->items[i / 2]);
i /= 2) {
assert(priority_queue->get_priority_fn(priority_queue->items[i]) != ULONG_MAX);
void *temp = priority_queue->items[i / 2];
priority_queue->items[i / 2] = priority_queue->items[i];
priority_queue->items[i] = temp;
/* If percolated to highest priority, update highest priority */
if (i / 2 == 1) priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (i / 2 == 1)
priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn(
priority_queue->items[1]));
}
}
/**
* Returns the index of a node's smallest child
* @param self the priority queue
* @param priority_queue the priority queue
* @param parent_index
* @returns the index of the smallest child
*/
static inline int
priority_queue_find_smallest_child(struct priority_queue *self, const int parent_index)
priority_queue_find_smallest_child(struct priority_queue *priority_queue, const int parent_index)
{
assert(self != NULL);
assert(parent_index >= 1 && parent_index <= self->size);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(parent_index >= 1 && parent_index <= priority_queue->size);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int left_child_index = 2 * parent_index;
int right_child_index = 2 * parent_index + 1;
assert(self->items[left_child_index] != NULL);
assert(priority_queue->items[left_child_index] != NULL);
int smallest_child_idx;
/* If we don't have a right child or the left child is smaller, return it */
if (right_child_index > self->size) {
if (right_child_index > priority_queue->size) {
smallest_child_idx = left_child_index;
} else if (self->get_priority_fn(self->items[left_child_index])
< self->get_priority_fn(self->items[right_child_index])) {
} else if (priority_queue->get_priority_fn(priority_queue->items[left_child_index])
< priority_queue->get_priority_fn(priority_queue->items[right_child_index])) {
smallest_child_idx = left_child_index;
} else {
/* Otherwise, return the right child */
@ -155,29 +160,29 @@ priority_queue_find_smallest_child(struct priority_queue *self, const int parent
/**
* Shifts the top of the heap downwards. Used after placing the last value at
* the top
* @param self the priority queue
* @param priority_queue the priority queue
*/
static inline void
priority_queue_percolate_down(struct priority_queue *self, int parent_index)
priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_index)
{
assert(self != NULL);
assert(self->get_priority_fn != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!listener_thread_is_running());
bool update_highest_value = parent_index == 1;
int left_child_index = 2 * parent_index;
while (left_child_index >= 2 && left_child_index <= self->size) {
int smallest_child_index = priority_queue_find_smallest_child(self, parent_index);
while (left_child_index >= 2 && left_child_index <= priority_queue->size) {
int smallest_child_index = priority_queue_find_smallest_child(priority_queue, parent_index);
/* Once the parent is equal to or less than its smallest child, break; */
if (self->get_priority_fn(self->items[parent_index])
<= self->get_priority_fn(self->items[smallest_child_index]))
if (priority_queue->get_priority_fn(priority_queue->items[parent_index])
<= priority_queue->get_priority_fn(priority_queue->items[smallest_child_index]))
break;
/* Otherwise, swap and continue down the tree */
void *temp = self->items[smallest_child_index];
self->items[smallest_child_index] = self->items[parent_index];
self->items[parent_index] = temp;
void *temp = priority_queue->items[smallest_child_index];
priority_queue->items[smallest_child_index] = priority_queue->items[parent_index];
priority_queue->items[parent_index] = temp;
parent_index = smallest_child_index;
left_child_index = 2 * parent_index;
@ -185,10 +190,11 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
/* Update memoized value if we touched the head */
if (update_highest_value) {
if (!priority_queue_is_empty(self)) {
priority_queue_update_highest_priority(self, self->get_priority_fn(self->items[1]));
if (!priority_queue_is_empty(priority_queue)) {
priority_queue_update_highest_priority(priority_queue, priority_queue->get_priority_fn(
priority_queue->items[1]));
} else {
priority_queue_update_highest_priority(self, ULONG_MAX);
priority_queue_update_highest_priority(priority_queue, ULONG_MAX);
}
}
}
@ -198,30 +204,32 @@ priority_queue_percolate_down(struct priority_queue *self, int parent_index)
********************/
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @param target_deadline the deadline that the request must be earlier than in order to dequeue
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline
*/
static inline int
priority_queue_dequeue_if_earlier_nolock(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline)
priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue, void **dequeued_element,
uint64_t target_deadline)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int return_code;
/* If the dequeue is not higher priority (earlier timestamp) than targed_deadline, return immediately */
if (priority_queue_is_empty(self) || self->highest_priority >= target_deadline) goto err_enoent;
if (priority_queue_is_empty(priority_queue) || priority_queue->highest_priority >= target_deadline)
goto err_enoent;
*dequeued_element = self->items[1];
self->items[1] = self->items[self->size];
self->items[self->size--] = NULL;
*dequeued_element = priority_queue->items[1];
priority_queue->items[1] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
priority_queue_percolate_down(self, 1);
priority_queue_percolate_down(priority_queue, 1);
return_code = 0;
done:
@ -232,19 +240,20 @@ err_enoent:
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @param target_deadline the deadline that the request must be earlier than in order to dequeue
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty or if none meet target_deadline
*/
static inline int
priority_queue_dequeue_if_earlier(struct priority_queue *self, void **dequeued_element, uint64_t target_deadline)
priority_queue_dequeue_if_earlier(struct priority_queue *priority_queue, void **dequeued_element,
uint64_t target_deadline)
{
int return_code;
LOCK_LOCK(&self->lock);
return_code = priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, target_deadline);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
return_code = priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, target_deadline);
LOCK_UNLOCK(&priority_queue->lock);
return return_code;
}
@ -264,79 +273,79 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri
/* Add one to capacity because this data structure ignores the element at 0 */
size_t one_based_capacity = capacity + 1;
struct priority_queue *self = (struct priority_queue *)calloc(sizeof(struct priority_queue)
+ sizeof(void *) * one_based_capacity,
1);
struct priority_queue *priority_queue = (struct priority_queue *)calloc(sizeof(struct priority_queue)
+ sizeof(void *) * one_based_capacity,
1);
/* We're assuming a min-heap implementation, so set to larget possible value */
priority_queue_update_highest_priority(self, ULONG_MAX);
self->size = 0;
self->capacity = one_based_capacity; // Add one because we skip element 0
self->get_priority_fn = get_priority_fn;
self->use_lock = use_lock;
priority_queue_update_highest_priority(priority_queue, ULONG_MAX);
priority_queue->size = 0;
priority_queue->capacity = one_based_capacity; // Add one because we skip element 0
priority_queue->get_priority_fn = get_priority_fn;
priority_queue->use_lock = use_lock;
if (use_lock) LOCK_INIT(&self->lock);
if (use_lock) LOCK_INIT(&priority_queue->lock);
return self;
return priority_queue;
}
/**
* Free the Priority Queue Data structure
* @param self the priority_queue to initialize
* @param priority_queue the priority_queue to initialize
*/
static inline void
priority_queue_free(struct priority_queue *self)
priority_queue_free(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(priority_queue != NULL);
free(self);
free(priority_queue);
}
/**
* @param self the priority_queue
* @param priority_queue the priority_queue
* @returns the number of elements in the priority queue
*/
static inline int
priority_queue_length_nolock(struct priority_queue *self)
priority_queue_length_nolock(struct priority_queue *priority_queue)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
return self->size;
return priority_queue->size;
}
/**
* @param self the priority_queue
* @param priority_queue the priority_queue
* @returns the number of elements in the priority queue
*/
static inline int
priority_queue_length(struct priority_queue *self)
priority_queue_length(struct priority_queue *priority_queue)
{
LOCK_LOCK(&self->lock);
int size = priority_queue_length_nolock(self);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
int size = priority_queue_length_nolock(priority_queue);
LOCK_UNLOCK(&priority_queue->lock);
return size;
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param value - the value we want to add
* @returns 0 on success. -ENOSPC on full.
*/
static inline int
priority_queue_enqueue_nolock(struct priority_queue *self, void *value)
priority_queue_enqueue_nolock(struct priority_queue *priority_queue, void *value)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(value != NULL);
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int rc;
if (unlikely(priority_queue_append(self, value) == -ENOSPC)) goto err_enospc;
if (unlikely(priority_queue_append(priority_queue, value) == -ENOSPC)) goto err_enospc;
priority_queue_percolate_up(self);
priority_queue_percolate_up(priority_queue);
rc = 0;
done:
@ -347,40 +356,40 @@ err_enospc:
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param value - the value we want to add
* @returns 0 on success. -ENOSPC on full.
*/
static inline int
priority_queue_enqueue(struct priority_queue *self, void *value)
priority_queue_enqueue(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&self->lock);
rc = priority_queue_enqueue_nolock(self, value);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
rc = priority_queue_enqueue_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
return rc;
}
/**
* @param self - the priority queue we want to delete from
* @param priority_queue - the priority queue we want to delete from
* @param value - the value we want to delete
* @returns 0 on success. -1 on not found
*/
static inline int
priority_queue_delete_nolock(struct priority_queue *self, void *value)
priority_queue_delete_nolock(struct priority_queue *priority_queue, void *value)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(value != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
for (int i = 1; i <= self->size; i++) {
if (self->items[i] == value) {
self->items[i] = self->items[self->size];
self->items[self->size--] = NULL;
priority_queue_percolate_down(self, i);
for (int i = 1; i <= priority_queue->size; i++) {
if (priority_queue->items[i] == value) {
priority_queue->items[i] = priority_queue->items[priority_queue->size];
priority_queue->items[priority_queue->size--] = NULL;
priority_queue_percolate_down(priority_queue, i);
return 0;
}
}
@ -389,64 +398,64 @@ priority_queue_delete_nolock(struct priority_queue *self, void *value)
}
/**
* @param self - the priority queue we want to delete from
* @param priority_queue - the priority queue we want to delete from
* @param value - the value we want to delete
* @returns 0 on success. -1 on not found
*/
static inline int
priority_queue_delete(struct priority_queue *self, void *value)
priority_queue_delete(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&self->lock);
rc = priority_queue_delete_nolock(self, value);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
rc = priority_queue_delete_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
return rc;
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_dequeue(struct priority_queue *self, void **dequeued_element)
priority_queue_dequeue(struct priority_queue *priority_queue, void **dequeued_element)
{
return priority_queue_dequeue_if_earlier(self, dequeued_element, UINT64_MAX);
return priority_queue_dequeue_if_earlier(priority_queue, dequeued_element, UINT64_MAX);
}
/**
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the dequeued element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_dequeue_nolock(struct priority_queue *self, void **dequeued_element)
priority_queue_dequeue_nolock(struct priority_queue *priority_queue, void **dequeued_element)
{
return priority_queue_dequeue_if_earlier_nolock(self, dequeued_element, UINT64_MAX);
return priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, UINT64_MAX);
}
/**
* Returns the top of the priority queue without removing it
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the top element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_top_nolock(struct priority_queue *self, void **dequeued_element)
priority_queue_top_nolock(struct priority_queue *priority_queue, void **dequeued_element)
{
assert(self != NULL);
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(self->get_priority_fn != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock));
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
int return_code;
if (priority_queue_is_empty(self)) goto err_enoent;
if (priority_queue_is_empty(priority_queue)) goto err_enoent;
*dequeued_element = self->items[1];
*dequeued_element = priority_queue->items[1];
return_code = 0;
done:
@ -458,18 +467,18 @@ err_enoent:
/**
* Returns the top of the priority queue without removing it
* @param self - the priority queue we want to add to
* @param priority_queue - the priority queue we want to add to
* @param dequeued_element a pointer to set to the top element
* @returns RC 0 if successfully set dequeued_element, -ENOENT if empty
*/
static inline int
priority_queue_top(struct priority_queue *self, void **dequeued_element)
priority_queue_top(struct priority_queue *priority_queue, void **dequeued_element)
{
int return_code;
LOCK_LOCK(&self->lock);
return_code = priority_queue_top_nolock(self, dequeued_element);
LOCK_UNLOCK(&self->lock);
LOCK_LOCK(&priority_queue->lock);
return_code = priority_queue_top_nolock(priority_queue, dequeued_element);
LOCK_UNLOCK(&priority_queue->lock);
return return_code;
}

@ -15,18 +15,20 @@ struct sandbox_state_history {
};
static inline void
sandbox_state_history_init(struct sandbox_state_history *self)
sandbox_state_history_init(struct sandbox_state_history *sandbox_state_history)
{
#ifdef LOG_STATE_CHANGES
memset(self, 0,
memset(sandbox_state_history, 0,
sizeof(struct sandbox_state_history) + SANDBOX_STATE_HISTORY_CAPACITY * sizeof(sandbox_state_t));
#endif
}
static inline void
sandbox_state_history_append(struct sandbox_state_history *self, sandbox_state_t state)
sandbox_state_history_append(struct sandbox_state_history *sandbox_state_history, sandbox_state_t state)
{
#ifdef LOG_STATE_CHANGES
if (likely(self->size < SANDBOX_STATE_HISTORY_CAPACITY)) { self->buffer[self->size++] = state; }
if (likely(sandbox_state_history->size < SANDBOX_STATE_HISTORY_CAPACITY)) {
sandbox_state_history->buffer[sandbox_state_history->size++] = state;
}
#endif
}

@ -10,11 +10,11 @@ struct vec_u8 {
};
static inline struct vec_u8 *vec_u8_alloc(void);
static inline int vec_u8_init(struct vec_u8 *self, size_t capacity);
static inline int vec_u8_init(struct vec_u8 *vec_u8, size_t capacity);
static inline struct vec_u8 *vec_u8_new(size_t capacity);
static inline void vec_u8_deinit(struct vec_u8 *self);
static inline void vec_u8_free(struct vec_u8 *self);
static inline void vec_u8_delete(struct vec_u8 *self);
static inline void vec_u8_deinit(struct vec_u8 *vec_u8);
static inline void vec_u8_free(struct vec_u8 *vec_u8);
static inline void vec_u8_delete(struct vec_u8 *vec_u8);
/**
* Allocates an uninitialized vec on the heap'
@ -28,22 +28,22 @@ vec_u8_alloc(void)
/**
* Initializes a vec, allocating a backing buffer for the provided capcity
* @param self pointer to an uninitialized vec
* @param vec_u8 pointer to an uninitialized vec
* @param capacity
* @returns 0 on success, -1 on failure
*/
static inline int
vec_u8_init(struct vec_u8 *self, size_t capacity)
vec_u8_init(struct vec_u8 *vec_u8, size_t capacity)
{
if (capacity == 0) {
self->buffer = NULL;
vec_u8->buffer = NULL;
} else {
self->buffer = calloc(capacity, sizeof(uint8_t));
if (self->buffer == NULL) return -1;
vec_u8->buffer = calloc(capacity, sizeof(uint8_t));
if (vec_u8->buffer == NULL) return -1;
}
self->length = 0;
self->capacity = capacity;
vec_u8->length = 0;
vec_u8->capacity = capacity;
return 0;
}
@ -56,36 +56,36 @@ vec_u8_init(struct vec_u8 *self, size_t capacity)
static inline struct vec_u8 *
vec_u8_new(size_t capacity)
{
struct vec_u8 *self = vec_u8_alloc();
if (self == NULL) return self;
struct vec_u8 *vec_u8 = vec_u8_alloc();
if (vec_u8 == NULL) return vec_u8;
int rc = vec_u8_init(self, capacity);
int rc = vec_u8_init(vec_u8, capacity);
if (rc < 0) {
vec_u8_free(self);
vec_u8_free(vec_u8);
return NULL;
}
return self;
return vec_u8;
}
/**
* Deinitialize a vec, clearing out members and releasing the backing buffer
* @param self
* @param vec_u8
*/
static inline void
vec_u8_deinit(struct vec_u8 *self)
vec_u8_deinit(struct vec_u8 *vec_u8)
{
if (self->capacity == 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
if (vec_u8->capacity == 0) {
assert(vec_u8->buffer == NULL);
assert(vec_u8->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
assert(vec_u8->buffer != NULL);
free(vec_u8->buffer);
vec_u8->buffer = NULL;
vec_u8->length = 0;
vec_u8->capacity = 0;
}
/**
@ -93,21 +93,21 @@ vec_u8_deinit(struct vec_u8 *self)
* Assumes that the vec has already been deinitialized
*/
static inline void
vec_u8_free(struct vec_u8 *self)
vec_u8_free(struct vec_u8 *vec_u8)
{
assert(self->buffer == NULL);
assert(self->length == 0);
assert(self->capacity == 0);
free(self);
assert(vec_u8->buffer == NULL);
assert(vec_u8->length == 0);
assert(vec_u8->capacity == 0);
free(vec_u8);
}
/**
* Deinitializes and frees a vec allocated to the heap
* @param self
* @param vec_u8
*/
static inline void
vec_u8_delete(struct vec_u8 *self)
vec_u8_delete(struct vec_u8 *vec_u8)
{
vec_u8_deinit(self);
vec_u8_free(self);
vec_u8_deinit(vec_u8);
vec_u8_free(vec_u8);
}

@ -23,11 +23,11 @@ struct wasm_memory {
};
static INLINE struct wasm_memory *wasm_memory_alloc(void);
static INLINE int wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max);
static INLINE int wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max);
static INLINE struct wasm_memory *wasm_memory_new(size_t initial, size_t max);
static INLINE void wasm_memory_deinit(struct wasm_memory *self);
static INLINE void wasm_memory_free(struct wasm_memory *self);
static INLINE void wasm_memory_delete(struct wasm_memory *self);
static INLINE void wasm_memory_deinit(struct wasm_memory *wasm_memory);
static INLINE void wasm_memory_free(struct wasm_memory *wasm_memory);
static INLINE void wasm_memory_delete(struct wasm_memory *wasm_memory);
static INLINE struct wasm_memory *
@ -37,9 +37,9 @@ wasm_memory_alloc(void)
}
static INLINE int
wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
wasm_memory_init(struct wasm_memory *wasm_memory, size_t initial, size_t max)
{
assert(self != NULL);
assert(wasm_memory != NULL);
/* We assume WASI modules, which are required to declare and export a linear memory with a non-zero size to
* allow a standard lib to initialize. Technically, a WebAssembly module that exports pure functions may not use
@ -50,20 +50,20 @@ wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
assert(max <= (size_t)UINT32_MAX + 1);
/* Allocate buffer of contiguous virtual addresses for full wasm32 linear memory and guard page */
self->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (self->buffer == MAP_FAILED) return -1;
wasm_memory->buffer = mmap(NULL, WASM_MEMORY_SIZE_TO_ALLOC, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (wasm_memory->buffer == MAP_FAILED) return -1;
/* Set the initial bytes to read / write */
int rc = mprotect(self->buffer, initial, PROT_READ | PROT_WRITE);
int rc = mprotect(wasm_memory->buffer, initial, PROT_READ | PROT_WRITE);
if (rc != 0) {
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
return -1;
}
ps_list_init_d(self);
self->size = initial;
self->capacity = initial;
self->max = max;
ps_list_init_d(wasm_memory);
wasm_memory->size = initial;
wasm_memory->capacity = initial;
wasm_memory->max = max;
return 0;
}
@ -71,70 +71,70 @@ wasm_memory_init(struct wasm_memory *self, size_t initial, size_t max)
static INLINE struct wasm_memory *
wasm_memory_new(size_t initial, size_t max)
{
struct wasm_memory *self = wasm_memory_alloc();
if (self == NULL) return self;
struct wasm_memory *wasm_memory = wasm_memory_alloc();
if (wasm_memory == NULL) return wasm_memory;
int rc = wasm_memory_init(self, initial, max);
int rc = wasm_memory_init(wasm_memory, initial, max);
if (rc < 0) {
assert(0);
wasm_memory_free(self);
wasm_memory_free(wasm_memory);
return NULL;
}
return self;
return wasm_memory;
}
static INLINE void
wasm_memory_deinit(struct wasm_memory *self)
wasm_memory_deinit(struct wasm_memory *wasm_memory)
{
assert(self != NULL);
assert(self->buffer != NULL);
munmap(self->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
self->buffer = NULL;
self->size = 0;
self->capacity = 0;
self->max = 0;
assert(wasm_memory != NULL);
assert(wasm_memory->buffer != NULL);
munmap(wasm_memory->buffer, WASM_MEMORY_SIZE_TO_ALLOC);
wasm_memory->buffer = NULL;
wasm_memory->size = 0;
wasm_memory->capacity = 0;
wasm_memory->max = 0;
}
static INLINE void
wasm_memory_free(struct wasm_memory *self)
wasm_memory_free(struct wasm_memory *wasm_memory)
{
assert(self != NULL);
assert(wasm_memory != NULL);
/* Assume prior deinitialization so we don't leak buffers */
assert(self->buffer == NULL);
assert(wasm_memory->buffer == NULL);
free(self);
free(wasm_memory);
}
static INLINE void
wasm_memory_delete(struct wasm_memory *self)
wasm_memory_delete(struct wasm_memory *wasm_memory)
{
assert(self != NULL);
assert(wasm_memory != NULL);
wasm_memory_deinit(self);
wasm_memory_free(self);
wasm_memory_deinit(wasm_memory);
wasm_memory_free(wasm_memory);
}
static INLINE void
wasm_memory_wipe(struct wasm_memory *self)
wasm_memory_wipe(struct wasm_memory *wasm_memory)
{
memset(self->buffer, 0, self->size);
memset(wasm_memory->buffer, 0, wasm_memory->size);
}
static INLINE void
wasm_memory_reinit(struct wasm_memory *self, size_t initial)
wasm_memory_reinit(struct wasm_memory *wasm_memory, size_t initial)
{
wasm_memory_wipe(self);
self->size = initial;
wasm_memory_wipe(wasm_memory);
wasm_memory->size = initial;
}
static INLINE int
wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
wasm_memory_expand(struct wasm_memory *wasm_memory, size_t size_to_expand)
{
size_t target_size = self->size + size_to_expand;
if (unlikely(target_size > self->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", self->size, self->max);
size_t target_size = wasm_memory->size + size_to_expand;
if (unlikely(target_size > wasm_memory->max)) {
fprintf(stderr, "wasm_memory_expand - Out of Memory!. %lu out of %lu\n", wasm_memory->size, wasm_memory->max);
return -1;
}
@ -143,37 +143,37 @@ wasm_memory_expand(struct wasm_memory *self, size_t size_to_expand)
* size is less than this "high water mark," we just need to update size for accounting purposes. Otherwise, we
* need to actually issue an mprotect syscall. The goal of these optimizations is to reduce mmap and demand
* paging overhead for repeated instantiations of a WebAssembly module. */
if (target_size > self->capacity) {
int rc = mprotect(self->buffer, target_size, PROT_READ | PROT_WRITE);
if (target_size > wasm_memory->capacity) {
int rc = mprotect(wasm_memory->buffer, target_size, PROT_READ | PROT_WRITE);
if (rc != 0) {
perror("wasm_memory_expand mprotect");
return -1;
}
self->capacity = target_size;
wasm_memory->capacity = target_size;
}
self->size = target_size;
wasm_memory->size = target_size;
return 0;
}
static INLINE void
wasm_memory_set_size(struct wasm_memory *self, size_t size)
wasm_memory_set_size(struct wasm_memory *wasm_memory, size_t size)
{
self->size = size;
wasm_memory->size = size;
}
static INLINE size_t
wasm_memory_get_size(struct wasm_memory *self)
wasm_memory_get_size(struct wasm_memory *wasm_memory)
{
return self->size;
return wasm_memory->size;
}
static INLINE void
wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_t region_size, uint8_t region[])
wasm_memory_initialize_region(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t region_size, uint8_t region[])
{
assert((size_t)offset + region_size <= self->size);
memcpy(&self->buffer[offset], region, region_size);
assert((size_t)offset + region_size <= wasm_memory->size);
memcpy(&wasm_memory->buffer[offset], region, region_size);
}
/* NOTE: These wasm_memory functions require pointer dereferencing. For this reason, they are not directly by wasm32
@ -186,10 +186,10 @@ wasm_memory_initialize_region(struct wasm_memory *self, uint32_t offset, uint32_
* @return void pointer to something in WebAssembly linear memory
*/
static INLINE void *
wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t size)
wasm_memory_get_ptr_void(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size)
{
assert(offset + size <= self->size);
return (void *)&self->buffer[offset];
assert(offset + size <= wasm_memory->size);
return (void *)&wasm_memory->buffer[offset];
}
/**
@ -198,10 +198,10 @@ wasm_memory_get_ptr_void(struct wasm_memory *self, uint32_t offset, uint32_t siz
* @return char at the offset
*/
static INLINE char
wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_char(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(char) <= self->size);
return *(char *)&self->buffer[offset];
assert(offset + sizeof(char) <= wasm_memory->size);
return *(char *)&wasm_memory->buffer[offset];
}
/**
@ -210,10 +210,10 @@ wasm_memory_get_char(struct wasm_memory *self, uint32_t offset)
* @return float at the offset
*/
static INLINE float
wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_f32(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(float) <= self->size);
return *(float *)&self->buffer[offset];
assert(offset + sizeof(float) <= wasm_memory->size);
return *(float *)&wasm_memory->buffer[offset];
}
/**
@ -222,10 +222,10 @@ wasm_memory_get_f32(struct wasm_memory *self, uint32_t offset)
* @return double at the offset
*/
static INLINE double
wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_f64(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(double) <= self->size);
return *(double *)&self->buffer[offset];
assert(offset + sizeof(double) <= wasm_memory->size);
return *(double *)&wasm_memory->buffer[offset];
}
/**
@ -234,10 +234,10 @@ wasm_memory_get_f64(struct wasm_memory *self, uint32_t offset)
* @return int8_t at the offset
*/
static INLINE int8_t
wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i8(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int8_t) <= self->size);
return *(int8_t *)&self->buffer[offset];
assert(offset + sizeof(int8_t) <= wasm_memory->size);
return *(int8_t *)&wasm_memory->buffer[offset];
}
/**
@ -246,10 +246,10 @@ wasm_memory_get_i8(struct wasm_memory *self, uint32_t offset)
* @return int16_t at the offset
*/
static INLINE int16_t
wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i16(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int16_t) <= self->size);
return *(int16_t *)&self->buffer[offset];
assert(offset + sizeof(int16_t) <= wasm_memory->size);
return *(int16_t *)&wasm_memory->buffer[offset];
}
/**
@ -258,10 +258,10 @@ wasm_memory_get_i16(struct wasm_memory *self, uint32_t offset)
* @return int32_t at the offset
*/
static INLINE int32_t
wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i32(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int32_t) <= self->size);
return *(int32_t *)&self->buffer[offset];
assert(offset + sizeof(int32_t) <= wasm_memory->size);
return *(int32_t *)&wasm_memory->buffer[offset];
}
/**
@ -270,16 +270,16 @@ wasm_memory_get_i32(struct wasm_memory *self, uint32_t offset)
* @return int32_t at the offset
*/
static INLINE int64_t
wasm_memory_get_i64(struct wasm_memory *self, uint32_t offset)
wasm_memory_get_i64(struct wasm_memory *wasm_memory, uint32_t offset)
{
assert(offset + sizeof(int64_t) <= self->size);
return *(int64_t *)&self->buffer[offset];
assert(offset + sizeof(int64_t) <= wasm_memory->size);
return *(int64_t *)&wasm_memory->buffer[offset];
}
static INLINE uint32_t
wasm_memory_get_page_count(struct wasm_memory *self)
wasm_memory_get_page_count(struct wasm_memory *wasm_memory)
{
return (uint32_t)(self->size / WASM_PAGE_SIZE);
return (uint32_t)(wasm_memory->size / WASM_PAGE_SIZE);
}
/**
@ -289,12 +289,12 @@ wasm_memory_get_page_count(struct wasm_memory *self)
* @return pointer to the string or NULL if max_length is reached without finding null-terminator
*/
static INLINE char *
wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
wasm_memory_get_string(struct wasm_memory *wasm_memory, uint32_t offset, uint32_t size)
{
assert(offset + (sizeof(char) * size) <= self->size);
assert(offset + (sizeof(char) * size) <= wasm_memory->size);
if (strnlen((const char *)&self->buffer[offset], size) < size) {
return (char *)&self->buffer[offset];
if (strnlen((const char *)&wasm_memory->buffer[offset], size) < size) {
return (char *)&wasm_memory->buffer[offset];
} else {
return NULL;
}
@ -306,10 +306,10 @@ wasm_memory_get_string(struct wasm_memory *self, uint32_t offset, uint32_t size)
* @return float at the offset
*/
static INLINE void
wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
wasm_memory_set_f32(struct wasm_memory *wasm_memory, uint32_t offset, float value)
{
assert(offset + sizeof(float) <= self->size);
*(float *)&self->buffer[offset] = value;
assert(offset + sizeof(float) <= wasm_memory->size);
*(float *)&wasm_memory->buffer[offset] = value;
}
/**
@ -318,10 +318,10 @@ wasm_memory_set_f32(struct wasm_memory *self, uint32_t offset, float value)
* @return double at the offset
*/
static INLINE void
wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
wasm_memory_set_f64(struct wasm_memory *wasm_memory, uint32_t offset, double value)
{
assert(offset + sizeof(double) <= self->size);
*(double *)&self->buffer[offset] = value;
assert(offset + sizeof(double) <= wasm_memory->size);
*(double *)&wasm_memory->buffer[offset] = value;
}
/**
@ -330,10 +330,10 @@ wasm_memory_set_f64(struct wasm_memory *self, uint32_t offset, double value)
* @return int8_t at the offset
*/
static INLINE void
wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
wasm_memory_set_i8(struct wasm_memory *wasm_memory, uint32_t offset, int8_t value)
{
assert(offset + sizeof(int8_t) <= self->size);
*(int8_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int8_t) <= wasm_memory->size);
*(int8_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -342,10 +342,10 @@ wasm_memory_set_i8(struct wasm_memory *self, uint32_t offset, int8_t value)
* @return int16_t at the offset
*/
static INLINE void
wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
wasm_memory_set_i16(struct wasm_memory *wasm_memory, uint32_t offset, int16_t value)
{
assert(offset + sizeof(int16_t) <= self->size);
*(int16_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int16_t) <= wasm_memory->size);
*(int16_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -354,10 +354,10 @@ wasm_memory_set_i16(struct wasm_memory *self, uint32_t offset, int16_t value)
* @return int32_t at the offset
*/
static INLINE void
wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
wasm_memory_set_i32(struct wasm_memory *wasm_memory, uint32_t offset, int32_t value)
{
assert(offset + sizeof(int32_t) <= self->size);
*(int32_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int32_t) <= wasm_memory->size);
*(int32_t *)&wasm_memory->buffer[offset] = value;
}
/**
@ -366,8 +366,8 @@ wasm_memory_set_i32(struct wasm_memory *self, uint32_t offset, int32_t value)
* @return int64_t at the offset
*/
static INLINE void
wasm_memory_set_i64(struct wasm_memory *self, uint64_t offset, int64_t value)
wasm_memory_set_i64(struct wasm_memory *wasm_memory, uint64_t offset, int64_t value)
{
assert(offset + sizeof(int64_t) <= self->size);
*(int64_t *)&self->buffer[offset] = value;
assert(offset + sizeof(int64_t) <= wasm_memory->size);
*(int64_t *)&wasm_memory->buffer[offset] = value;
}

@ -29,93 +29,93 @@ wasm_stack_allocate(void)
* @returns 0 on success, -1 on error
*/
static inline int
wasm_stack_init(struct wasm_stack *self, size_t capacity)
wasm_stack_init(struct wasm_stack *wasm_stack, size_t capacity)
{
assert(self);
assert(wasm_stack);
int rc = 0;
self->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
wasm_stack->buffer = (uint8_t *)mmap(NULL, /* guard page */ PAGE_SIZE + capacity, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (unlikely(self->buffer == MAP_FAILED)) {
if (unlikely(wasm_stack->buffer == MAP_FAILED)) {
perror("sandbox allocate stack");
goto err_stack_allocation_failed;
}
self->low = (uint8_t *)mmap(self->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
wasm_stack->low = (uint8_t *)mmap(wasm_stack->buffer + /* guard page */ PAGE_SIZE, capacity, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
if (unlikely(self->low == MAP_FAILED)) {
if (unlikely(wasm_stack->low == MAP_FAILED)) {
perror("sandbox set stack read/write");
goto err_stack_prot_failed;
}
ps_list_init_d(self);
self->capacity = capacity;
self->high = self->low + capacity;
ps_list_init_d(wasm_stack);
wasm_stack->capacity = capacity;
wasm_stack->high = wasm_stack->low + capacity;
rc = 0;
done:
return rc;
err_stack_prot_failed:
rc = munmap(self->buffer, PAGE_SIZE + capacity);
rc = munmap(wasm_stack->buffer, PAGE_SIZE + capacity);
if (rc == -1) perror("munmap");
err_stack_allocation_failed:
self->buffer = NULL;
wasm_stack->buffer = NULL;
rc = -1;
goto done;
}
static INLINE void
wasm_stack_free(struct wasm_stack *self)
wasm_stack_free(struct wasm_stack *wasm_stack)
{
free(self);
free(wasm_stack);
}
static struct wasm_stack *
wasm_stack_new(size_t capacity)
{
struct wasm_stack *self = wasm_stack_allocate();
int rc = wasm_stack_init(self, capacity);
struct wasm_stack *wasm_stack = wasm_stack_allocate();
int rc = wasm_stack_init(wasm_stack, capacity);
if (rc < 0) {
wasm_stack_free(self);
wasm_stack_free(wasm_stack);
return NULL;
}
return self;
return wasm_stack;
}
static inline void
wasm_stack_deinit(struct wasm_stack *self)
wasm_stack_deinit(struct wasm_stack *wasm_stack)
{
assert(self != NULL);
assert(self->buffer != NULL);
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
/* The stack start is the bottom of the usable stack, but we allocated a guard page below this */
munmap(self->buffer, self->capacity + PAGE_SIZE);
self->buffer = NULL;
self->high = NULL;
self->low = NULL;
munmap(wasm_stack->buffer, wasm_stack->capacity + PAGE_SIZE);
wasm_stack->buffer = NULL;
wasm_stack->high = NULL;
wasm_stack->low = NULL;
}
static inline void
wasm_stack_delete(struct wasm_stack *self)
wasm_stack_delete(struct wasm_stack *wasm_stack)
{
assert(self != NULL);
assert(self->buffer != NULL);
wasm_stack_deinit(self);
wasm_stack_free(self);
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
wasm_stack_deinit(wasm_stack);
wasm_stack_free(wasm_stack);
}
static inline void
wasm_stack_reinit(struct wasm_stack *self)
wasm_stack_reinit(struct wasm_stack *wasm_stack)
{
assert(self != NULL);
assert(self->buffer != NULL);
assert(wasm_stack != NULL);
assert(wasm_stack->buffer != NULL);
self->low = self->buffer + /* guard page */ PAGE_SIZE;
wasm_stack->low = wasm_stack->buffer + /* guard page */ PAGE_SIZE;
memset(self->low, 0, self->capacity);
ps_list_init_d(self);
self->high = self->low + self->capacity;
memset(wasm_stack->low, 0, wasm_stack->capacity);
ps_list_init_d(wasm_stack);
wasm_stack->high = wasm_stack->low + wasm_stack->capacity;
}

@ -21,11 +21,11 @@ struct wasm_table {
};
static INLINE struct wasm_table *wasm_table_alloc(void);
static INLINE int wasm_table_init(struct wasm_table *self, size_t capacity);
static INLINE int wasm_table_init(struct wasm_table *wasm_table, size_t capacity);
static INLINE struct wasm_table *wasm_table_new(size_t capacity);
static INLINE void wasm_table_deinit(struct wasm_table *self);
static INLINE void wasm_table_free(struct wasm_table *self);
static INLINE void wasm_table_delete(struct wasm_table *self);
static INLINE void wasm_table_deinit(struct wasm_table *wasm_table);
static INLINE void wasm_table_free(struct wasm_table *wasm_table);
static INLINE void wasm_table_delete(struct wasm_table *wasm_table);
static INLINE struct wasm_table *
wasm_table_alloc(void)
@ -34,17 +34,17 @@ wasm_table_alloc(void)
}
static INLINE int
wasm_table_init(struct wasm_table *self, size_t capacity)
wasm_table_init(struct wasm_table *wasm_table, size_t capacity)
{
assert(self != NULL);
assert(wasm_table != NULL);
if (capacity > 0) {
self->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
if (self->buffer == NULL) return -1;
wasm_table->buffer = calloc(capacity, sizeof(struct wasm_table_entry));
if (wasm_table->buffer == NULL) return -1;
}
self->capacity = capacity;
self->length = 0;
wasm_table->capacity = capacity;
wasm_table->length = 0;
return 0;
}
@ -52,50 +52,50 @@ wasm_table_init(struct wasm_table *self, size_t capacity)
static INLINE struct wasm_table *
wasm_table_new(size_t capacity)
{
struct wasm_table *self = wasm_table_alloc();
if (self == NULL) return NULL;
struct wasm_table *wasm_table = wasm_table_alloc();
if (wasm_table == NULL) return NULL;
int rc = wasm_table_init(self, capacity);
int rc = wasm_table_init(wasm_table, capacity);
if (rc < 0) {
wasm_table_free(self);
wasm_table_free(wasm_table);
return NULL;
}
return self;
return wasm_table;
}
static INLINE void
wasm_table_deinit(struct wasm_table *self)
wasm_table_deinit(struct wasm_table *wasm_table)
{
assert(self != NULL);
assert(wasm_table != NULL);
if (self->capacity > 0) {
assert(self->buffer == NULL);
assert(self->length == 0);
if (wasm_table->capacity > 0) {
assert(wasm_table->buffer == NULL);
assert(wasm_table->length == 0);
return;
}
assert(self->buffer != NULL);
free(self->buffer);
self->buffer = NULL;
self->length = 0;
self->capacity = 0;
assert(wasm_table->buffer != NULL);
free(wasm_table->buffer);
wasm_table->buffer = NULL;
wasm_table->length = 0;
wasm_table->capacity = 0;
}
static INLINE void
wasm_table_free(struct wasm_table *self)
wasm_table_free(struct wasm_table *wasm_table)
{
assert(self != NULL);
free(self);
assert(wasm_table != NULL);
free(wasm_table);
}
static INLINE void *
wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
wasm_table_get(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id)
{
assert(self != NULL);
assert(idx < self->capacity);
assert(wasm_table != NULL);
assert(idx < wasm_table->capacity);
struct wasm_table_entry f = self->buffer[idx];
struct wasm_table_entry f = wasm_table->buffer[idx];
// FIXME: Commented out function type check because of gocr
// assert(f.type_id == type_id);
@ -105,14 +105,14 @@ wasm_table_get(struct wasm_table *self, uint32_t idx, uint32_t type_id)
}
static INLINE void
wasm_table_set(struct wasm_table *self, uint32_t idx, uint32_t type_id, char *pointer)
wasm_table_set(struct wasm_table *wasm_table, uint32_t idx, uint32_t type_id, char *pointer)
{
assert(self != NULL);
assert(idx < self->capacity);
assert(wasm_table != NULL);
assert(idx < wasm_table->capacity);
assert(pointer != NULL);
/* TODO: atomic for multiple concurrent invocations? Issue #97 */
if (self->buffer[idx].type_id == type_id && self->buffer[idx].func_pointer == pointer) return;
if (wasm_table->buffer[idx].type_id == type_id && wasm_table->buffer[idx].func_pointer == pointer) return;
self->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
wasm_table->buffer[idx] = (struct wasm_table_entry){ .type_id = type_id, .func_pointer = pointer };
}

@ -3,29 +3,29 @@
/**
* Initializes perf window
* @param self
* @param admissions_info
*/
void
admissions_info_initialize(struct admissions_info *self, int percentile, uint64_t expected_execution,
admissions_info_initialize(struct admissions_info *admissions_info, int percentile, uint64_t expected_execution,
uint64_t relative_deadline)
{
#ifdef ADMISSIONS_CONTROL
assert(relative_deadline > 0);
assert(expected_execution > 0);
self->relative_deadline = relative_deadline;
self->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
debuglog("Initial Estimate: %lu\n", self->estimate);
assert(self != NULL);
admissions_info->relative_deadline = relative_deadline;
admissions_info->estimate = admissions_control_calculate_estimate(expected_execution, relative_deadline);
debuglog("Initial Estimate: %lu\n", admissions_info->estimate);
assert(admissions_info != NULL);
perf_window_initialize(&self->perf_window);
perf_window_initialize(&admissions_info->perf_window);
if (unlikely(percentile < 50 || percentile > 99)) panic("Invalid admissions percentile");
self->percentile = percentile;
admissions_info->percentile = percentile;
self->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
admissions_info->control_index = PERF_WINDOW_BUFFER_SIZE * percentile / 100;
#ifdef LOG_ADMISSIONS_CONTROL
debuglog("Percentile: %d\n", self->percentile);
debuglog("Control Index: %d\n", self->control_index);
debuglog("Percentile: %d\n", admissions_info->percentile);
debuglog("Control Index: %d\n", admissions_info->control_index);
#endif
#endif
}
@ -33,19 +33,21 @@ admissions_info_initialize(struct admissions_info *self, int percentile, uint64_
/*
* Adds an execution value to the perf window and calculates and caches and updated estimate
* @param self
* @param admissions_info
* @param execution_duration
*/
void
admissions_info_update(struct admissions_info *self, uint64_t execution_duration)
admissions_info_update(struct admissions_info *admissions_info, uint64_t execution_duration)
{
#ifdef ADMISSIONS_CONTROL
struct perf_window *perf_window = &self->perf_window;
struct perf_window *perf_window = &admissions_info->perf_window;
LOCK_LOCK(&self->perf_window.lock);
LOCK_LOCK(&admissions_info->perf_window.lock);
perf_window_add(perf_window, execution_duration);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, self->percentile, self->control_index);
self->estimate = admissions_control_calculate_estimate(estimated_execution, self->relative_deadline);
LOCK_UNLOCK(&self->perf_window.lock);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, admissions_info->percentile,
admissions_info->control_index);
admissions_info->estimate = admissions_control_calculate_estimate(estimated_execution,
admissions_info->relative_deadline);
LOCK_UNLOCK(&admissions_info->perf_window.lock);
#endif
}

@ -7,16 +7,20 @@
**************************************************/
void
http_request_print(struct http_request *self)
http_request_print(struct http_request *http_request)
{
printf("Header Count %d\n", self->header_count);
printf("Header Count %d\n", http_request->header_count);
printf("Header Content:\n");
for (int i = 0; i < self->header_count; i++) {
for (int j = 0; j < self->headers[i].key_length; j++) { putchar(self->headers[i].key[j]); }
for (int i = 0; i < http_request->header_count; i++) {
for (int j = 0; j < http_request->headers[i].key_length; j++) {
putchar(http_request->headers[i].key[j]);
}
putchar(':');
for (int j = 0; j < self->headers[i].value_length; j++) { putchar(self->headers[i].value[j]); }
for (int j = 0; j < http_request->headers[i].value_length; j++) {
putchar(http_request->headers[i].value[j]);
}
putchar('\n');
}
printf("Body Length %d\n", self->body_length);
printf("Body Read Length %d\n", self->body_read_length);
printf("Body Length %d\n", http_request->body_length);
printf("Body Read Length %d\n", http_request->body_read_length);
}

@ -67,15 +67,15 @@ sandbox_free_stack(struct sandbox *sandbox)
* @returns 0 on success, -1 on error
*/
static inline int
sandbox_allocate_http_buffers(struct sandbox *self)
sandbox_allocate_http_buffers(struct sandbox *sandbox)
{
int rc;
rc = vec_u8_init(&self->request, self->module->max_request_size);
rc = vec_u8_init(&sandbox->request, sandbox->module->max_request_size);
if (rc < 0) return -1;
rc = vec_u8_init(&self->response, self->module->max_response_size);
rc = vec_u8_init(&sandbox->response, sandbox->module->max_response_size);
if (rc < 0) {
vec_u8_deinit(&self->request);
vec_u8_deinit(&sandbox->request);
return -1;
}

Loading…
Cancel
Save