Merge branch 'master' of github.com:gwsystems/sledge-serverless-framework into cmu-sod

master
Sean McBride 3 years ago
commit d9d149784d

@ -59,7 +59,6 @@ BINARY_NAME=sledgert
# CFLAGS += -DLOG_ADMISSIONS_CONTROL
# CFLAGS += -DLOG_CONTEXT_SWITCHES
# CFLAGS += -DLOG_HTTP_PARSER
# CFLAGS += -DLOG_LOCK_OVERHEAD
# CFLAGS += -DLOG_TENANT_LOADING
# CFLAGS += -DLOG_PREEMPTION
# CFLAGS += -DLOG_SANDBOX_ALLOCATION

@ -1,11 +0,0 @@
#pragma once
#include <stdint.h>
#include <threads.h>
extern thread_local uint64_t generic_thread_lock_duration;
extern thread_local uint64_t generic_thread_lock_longest;
extern thread_local uint64_t generic_thread_start_timestamp;
void generic_thread_dump_lock_overhead(void);
void generic_thread_initialize(void);

@ -3,7 +3,6 @@
#include <stdbool.h>
#include <stdnoreturn.h>
#include "generic_thread.h"
#include "http_session.h"
#include "module.h"

@ -1,68 +1,81 @@
#pragma once
#include <assert.h>
#include <spinlock/mcs.h>
#include <stdint.h>
#include "arch/getcycles.h"
#include "runtime.h"
#include "generic_thread.h"
typedef ck_spinlock_mcs_t lock_t;
/* A linked list of nodes */
struct lock_wrapper {
uint64_t longest_held;
uint64_t total_held;
ck_spinlock_mcs_t lock;
};
/* A node on the linked list */
struct lock_node {
struct ck_spinlock_mcs node;
uint64_t time_locked;
};
typedef struct lock_wrapper lock_t;
typedef struct lock_node lock_node_t;
/**
* Initializes a lock of type lock_t
* Initializes a lock
* @param lock - the address of the lock
*/
#define LOCK_INIT(lock) ck_spinlock_mcs_init((lock))
static inline void
lock_init(lock_t *self)
{
self->total_held = 0;
self->longest_held = 0;
ck_spinlock_mcs_init(&self->lock);
}
/**
* Checks if a lock is locked
* @param lock - the address of the lock
* @returns bool if lock is locked
*/
#define LOCK_IS_LOCKED(lock) ck_spinlock_mcs_locked((lock))
static inline bool
lock_is_locked(lock_t *self)
{
return ck_spinlock_mcs_locked(&self->lock);
}
/**
* Locks a lock, keeping track of overhead
* @param lock - the address of the lock
* @param unique_variable_name - a unique prefix to hygienically namespace an associated lock/unlock pair
* @param node - node to add to lock
*/
static inline void
lock_lock(lock_t *self, lock_node_t *node)
{
assert(node->time_locked == 0);
#define LOCK_LOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
struct ck_spinlock_mcs _hygiene_##unique_variable_name##_node; \
uint64_t _hygiene_##unique_variable_name##_pre = __getcycles(); \
ck_spinlock_mcs_lock((lock), &(_hygiene_##unique_variable_name##_node)); \
uint64_t _hygiene_##unique_variable_name##_duration = (__getcycles() - _hygiene_##unique_variable_name##_pre); \
if (_hygiene_##unique_variable_name##_duration > generic_thread_lock_longest) { \
generic_thread_lock_longest = _hygiene_##unique_variable_name##_duration; \
} \
generic_thread_lock_duration += _hygiene_##unique_variable_name##_duration;
node->time_locked = __getcycles();
ck_spinlock_mcs_lock(&self->lock, &node->node);
}
/**
* Unlocks a lock
* @param lock - the address of the lock
* @param unique_variable_name - a unique prefix to hygienically namespace an associated lock/unlock pair
* @param node - node used when calling lock_lock
*/
#define LOCK_UNLOCK_WITH_BOOKKEEPING(lock, unique_variable_name) \
ck_spinlock_mcs_unlock(lock, &(_hygiene_##unique_variable_name##_node));
static inline void
lock_unlock(lock_t *self, lock_node_t *node)
{
assert(node->time_locked > 0);
/**
* Locks a lock, keeping track of overhead
* Assumes the availability of DEFAULT as a hygienic prefix for DEFAULT_node and DEFAULT_pre
*
* As such, this API can only be used once in a lexical scope.
*
* Use LOCK_LOCK_WITH_BOOKKEEPING and LOCK_UNLOCK_WITH_BOOKKEEPING if multiple locks are required
* @param lock - the address of the lock
*/
#define LOCK_LOCK(lock) LOCK_LOCK_WITH_BOOKKEEPING(lock, DEFAULT)
/**
* Unlocks a lock
* Uses lock node NODE_DEFAULT and timestamp PRE_DEFAULT, so this assumes use of LOCK_LOCK
* This API can only be used once in a lexical scope. If this isn't true, use LOCK_LOCK_WITH_BOOKKEEPING and
* LOCK_UNLOCK_WITH_BOOKKEEPING
* @param lock - the address of the lock
*/
#define LOCK_UNLOCK(lock) LOCK_UNLOCK_WITH_BOOKKEEPING(lock, DEFAULT)
ck_spinlock_mcs_unlock(&self->lock, &node->node);
uint64_t now = __getcycles();
assert(node->time_locked < now);
uint64_t duration = now - node->time_locked;
node->time_locked = 0;
if (unlikely(duration > self->longest_held)) { self->longest_held = duration; }
self->total_held += duration;
}

@ -37,7 +37,7 @@ map_init(struct map *restrict map)
{
for (int i = 0; i < MAP_BUCKET_COUNT; i++) {
map->buckets[i].head = NULL;
LOCK_INIT(&map->buckets[i].lock);
lock_init(&map->buckets[i].lock);
}
};
@ -67,7 +67,8 @@ map_get(struct map *map, uint8_t *key, uint32_t key_len, uint32_t *ret_value_len
struct map_bucket *bucket = &map->buckets[hash % MAP_BUCKET_COUNT];
LOCK_LOCK(&bucket->lock);
lock_node_t node = {};
lock_lock(&bucket->lock, &node);
for (struct map_node *node = bucket->head; node != NULL; node = node->next) {
if (node->hash == hash) {
value = node->value;
@ -79,7 +80,7 @@ map_get(struct map *map, uint8_t *key, uint32_t key_len, uint32_t *ret_value_len
if (value == NULL) *ret_value_len = 0;
DONE:
LOCK_UNLOCK(&bucket->lock);
lock_unlock(&bucket->lock, &node);
return value;
}
@ -90,7 +91,8 @@ map_set(struct map *map, uint8_t *key, uint32_t key_len, uint8_t *value, uint32_
uint32_t hash = MAP_HASH(key, key_len);
struct map_bucket *bucket = &map->buckets[hash % MAP_BUCKET_COUNT];
LOCK_LOCK(&bucket->lock);
lock_node_t node;
lock_lock(&bucket->lock, &node);
for (struct map_node *node = bucket->head; node != NULL; node = node->next) {
if (node->hash == hash) goto DONE;
}
@ -111,7 +113,7 @@ map_set(struct map *map, uint8_t *key, uint32_t key_len, uint8_t *value, uint32_
did_set = true;
DONE:
LOCK_UNLOCK(&bucket->lock);
lock_unlock(&bucket->lock, &node);
return did_set;
}
@ -125,7 +127,8 @@ map_delete(struct map *map, uint8_t *key, uint32_t key_len)
uint32_t hash = MAP_HASH(key, key_len);
struct map_bucket *bucket = &map->buckets[hash % MAP_BUCKET_COUNT];
LOCK_LOCK(&bucket->lock);
lock_node_t node;
lock_lock(&bucket->lock, &node);
struct map_node *prev = bucket->head;
if (prev != NULL && prev->hash == hash) {
@ -147,7 +150,7 @@ map_delete(struct map *map, uint8_t *key, uint32_t key_len)
}
DONE:
LOCK_UNLOCK(&bucket->lock);
lock_unlock(&bucket->lock, &node);
return did_delete;
}
@ -156,7 +159,8 @@ map_upsert(struct map *map, uint8_t *key, uint32_t key_len, uint8_t *value, uint
{
uint32_t hash = MAP_HASH(key, key_len);
struct map_bucket *bucket = &map->buckets[hash % MAP_BUCKET_COUNT];
LOCK_LOCK(&bucket->lock);
lock_node_t node;
lock_lock(&bucket->lock, &node);
for (struct map_node *node = bucket->head; node != NULL; node = node->next) {
if (node->hash == hash) {
@ -187,5 +191,5 @@ map_upsert(struct map *map, uint8_t *key, uint32_t key_len, uint8_t *value, uint
bucket->head = new_node;
DONE:
LOCK_UNLOCK(&bucket->lock);
lock_unlock(&bucket->lock, &node);
}

@ -19,7 +19,7 @@ perf_window_initialize(struct perf_window *perf_window)
{
assert(perf_window != NULL);
LOCK_INIT(&perf_window->lock);
lock_init(&perf_window->lock);
perf_window->count = 0;
memset(perf_window->by_duration, 0, sizeof(struct execution_node) * PERF_WINDOW_BUFFER_SIZE);
memset(perf_window->by_termination, 0, sizeof(uint16_t) * PERF_WINDOW_BUFFER_SIZE);
@ -36,7 +36,7 @@ perf_window_initialize(struct perf_window *perf_window)
static inline void
perf_window_swap(struct perf_window *perf_window, uint16_t first_by_duration_idx, uint16_t second_by_duration_idx)
{
assert(LOCK_IS_LOCKED(&perf_window->lock));
assert(lock_is_locked(&perf_window->lock));
assert(perf_window != NULL);
assert(first_by_duration_idx >= 0 && first_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
assert(second_by_duration_idx >= 0 && second_by_duration_idx < PERF_WINDOW_BUFFER_SIZE);
@ -81,7 +81,7 @@ perf_window_add(struct perf_window *perf_window, uint64_t value)
uint16_t idx_of_oldest;
bool check_up;
if (unlikely(!LOCK_IS_LOCKED(&perf_window->lock))) panic("lock not held when calling perf_window_add\n");
if (unlikely(!lock_is_locked(&perf_window->lock))) panic("lock not held when calling perf_window_add\n");
/* A successful invocation should run for a non-zero amount of time */
assert(value > 0);

@ -4,7 +4,6 @@
#include <stdbool.h>
#include <stdlib.h>
#include "generic_thread.h"
#include "lock.h"
#include "ps_list.h"
@ -26,7 +25,7 @@
{ \
ps_list_head_init(&self->list); \
self->use_lock = use_lock; \
if (use_lock) LOCK_INIT(&self->lock); \
if (use_lock) lock_init(&self->lock); \
} \
\
static inline void STRUCT_NAME##_pool_deinit(struct STRUCT_NAME##_pool *self) \
@ -44,7 +43,7 @@
static inline struct STRUCT_NAME *STRUCT_NAME##_pool_remove_nolock(struct STRUCT_NAME##_pool *self) \
{ \
assert(self != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
assert(!self->use_lock || lock_is_locked(&self->lock)); \
\
struct STRUCT_NAME *obj = NULL; \
\
@ -66,9 +65,10 @@
bool is_empty = STRUCT_NAME##_pool_is_empty(self); \
if (is_empty) return obj; \
\
LOCK_LOCK(&self->lock); \
lock_node_t node = {}; \
lock_lock(&self->lock, &node); \
obj = STRUCT_NAME##_pool_remove_nolock(self); \
LOCK_UNLOCK(&self->lock); \
lock_unlock(&self->lock, &node); \
return obj; \
} \
\
@ -76,7 +76,7 @@
{ \
assert(self != NULL); \
assert(obj != NULL); \
assert(!self->use_lock || LOCK_IS_LOCKED(&self->lock)); \
assert(!self->use_lock || lock_is_locked(&self->lock)); \
\
ps_list_head_add_d(&self->list, obj); \
} \
@ -87,7 +87,8 @@
assert(obj != NULL); \
assert(self->use_lock); \
\
LOCK_LOCK(&self->lock); \
lock_node_t node = {}; \
lock_lock(&self->lock, &node); \
STRUCT_NAME##_pool_add_nolock(self, obj); \
LOCK_UNLOCK(&self->lock); \
lock_unlock(&self->lock, &node); \
}

@ -60,7 +60,7 @@ priority_queue_append(struct priority_queue *priority_queue, void *new_item)
{
assert(priority_queue != NULL);
assert(new_item != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int rc;
@ -85,7 +85,7 @@ static inline bool
priority_queue_is_empty(struct priority_queue *priority_queue)
{
assert(priority_queue != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
return priority_queue->size == 0;
}
@ -99,7 +99,7 @@ priority_queue_percolate_up(struct priority_queue *priority_queue)
{
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
/* If there's only one element, set memoized lookup and early out */
if (priority_queue->size == 1) {
@ -135,7 +135,7 @@ priority_queue_find_smallest_child(struct priority_queue *priority_queue, const
assert(priority_queue != NULL);
assert(parent_index >= 1 && parent_index <= priority_queue->size);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int left_child_index = 2 * parent_index;
int right_child_index = 2 * parent_index + 1;
@ -167,7 +167,7 @@ priority_queue_percolate_down(struct priority_queue *priority_queue, int parent_
{
assert(priority_queue != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
assert(!listener_thread_is_running());
bool update_highest_value = parent_index == 1;
@ -217,7 +217,7 @@ priority_queue_dequeue_if_earlier_nolock(struct priority_queue *priority_queue,
assert(dequeued_element != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!listener_thread_is_running());
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int return_code;
@ -251,9 +251,10 @@ priority_queue_dequeue_if_earlier(struct priority_queue *priority_queue, void **
{
int return_code;
LOCK_LOCK(&priority_queue->lock);
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
return_code = priority_queue_dequeue_if_earlier_nolock(priority_queue, dequeued_element, target_deadline);
LOCK_UNLOCK(&priority_queue->lock);
lock_unlock(&priority_queue->lock, &node);
return return_code;
}
@ -281,7 +282,7 @@ priority_queue_initialize(size_t capacity, bool use_lock, priority_queue_get_pri
priority_queue->get_priority_fn = get_priority_fn;
priority_queue->use_lock = use_lock;
if (use_lock) LOCK_INIT(&priority_queue->lock);
if (use_lock) lock_init(&priority_queue->lock);
return priority_queue;
}
@ -332,7 +333,7 @@ static inline int
priority_queue_length_nolock(struct priority_queue *priority_queue)
{
assert(priority_queue != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
return priority_queue->size;
}
@ -344,9 +345,10 @@ priority_queue_length_nolock(struct priority_queue *priority_queue)
static inline int
priority_queue_length(struct priority_queue *priority_queue)
{
LOCK_LOCK(&priority_queue->lock);
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
int size = priority_queue_length_nolock(priority_queue);
LOCK_UNLOCK(&priority_queue->lock);
lock_unlock(&priority_queue->lock, &node);
return size;
}
@ -360,7 +362,7 @@ priority_queue_enqueue_nolock(struct priority_queue *priority_queue, void *value
{
assert(priority_queue != NULL);
assert(value != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int rc;
@ -386,9 +388,10 @@ priority_queue_enqueue(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&priority_queue->lock);
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
rc = priority_queue_enqueue_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
lock_unlock(&priority_queue->lock, &node);
return rc;
}
@ -403,7 +406,7 @@ priority_queue_delete_nolock(struct priority_queue *priority_queue, void *value)
{
assert(priority_queue != NULL);
assert(value != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
for (int i = 1; i <= priority_queue->size; i++) {
if (priority_queue->items[i] == value) {
@ -427,9 +430,10 @@ priority_queue_delete(struct priority_queue *priority_queue, void *value)
{
int rc;
LOCK_LOCK(&priority_queue->lock);
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
rc = priority_queue_delete_nolock(priority_queue, value);
LOCK_UNLOCK(&priority_queue->lock);
lock_unlock(&priority_queue->lock, &node);
return rc;
}
@ -468,7 +472,7 @@ priority_queue_top_nolock(struct priority_queue *priority_queue, void **dequeued
assert(priority_queue != NULL);
assert(dequeued_element != NULL);
assert(priority_queue->get_priority_fn != NULL);
assert(!priority_queue->use_lock || LOCK_IS_LOCKED(&priority_queue->lock));
assert(!priority_queue->use_lock || lock_is_locked(&priority_queue->lock));
int return_code;
@ -495,9 +499,10 @@ priority_queue_top(struct priority_queue *priority_queue, void **dequeued_elemen
{
int return_code;
LOCK_LOCK(&priority_queue->lock);
lock_node_t node = {};
lock_lock(&priority_queue->lock, &node);
return_code = priority_queue_top_nolock(priority_queue, dequeued_element);
LOCK_UNLOCK(&priority_queue->lock);
lock_unlock(&priority_queue->lock, &node);
return return_code;
}

@ -2,7 +2,6 @@
#include <threads.h>
#include "generic_thread.h"
#include "runtime.h"
extern thread_local struct arch_context worker_thread_base_context;

@ -42,12 +42,13 @@ admissions_info_update(struct admissions_info *admissions_info, uint64_t executi
#ifdef ADMISSIONS_CONTROL
struct perf_window *perf_window = &admissions_info->perf_window;
LOCK_LOCK(&admissions_info->perf_window.lock);
lock_node_t node = {};
lock_lock(&admissions_info->perf_window.lock, &node);
perf_window_add(perf_window, execution_duration);
uint64_t estimated_execution = perf_window_get_percentile(perf_window, admissions_info->percentile,
admissions_info->control_index);
admissions_info->estimate = admissions_control_calculate_estimate(estimated_execution,
admissions_info->relative_deadline);
LOCK_UNLOCK(&admissions_info->perf_window.lock);
lock_unlock(&admissions_info->perf_window.lock, &node);
#endif
}

@ -28,8 +28,6 @@ current_sandbox_sleep()
struct sandbox *sleeping_sandbox = current_sandbox_get();
assert(sleeping_sandbox != NULL);
generic_thread_dump_lock_overhead();
switch (sleeping_sandbox->state) {
case SANDBOX_RUNNING_SYS: {
sandbox_sleep(sleeping_sandbox);
@ -54,8 +52,6 @@ current_sandbox_exit()
struct sandbox *exiting_sandbox = current_sandbox_get();
assert(exiting_sandbox != NULL);
generic_thread_dump_lock_overhead();
switch (exiting_sandbox->state) {
case SANDBOX_RETURNED:
sandbox_exit_success(exiting_sandbox);
@ -107,7 +103,6 @@ current_sandbox_wasm_trap_handler(int trapno)
debuglog("%s", error_message);
worker_thread_epoll_remove_sandbox(sandbox);
generic_thread_dump_lock_overhead();
current_sandbox_exit();
assert(0);
}
@ -155,7 +150,6 @@ current_sandbox_init()
err:
debuglog("%s", error_message);
worker_thread_epoll_remove_sandbox(sandbox);
generic_thread_dump_lock_overhead();
current_sandbox_exit();
return NULL;
}
@ -179,7 +173,6 @@ done:
sandbox_set_as_returned(sandbox, SANDBOX_RUNNING_SYS);
/* Cleanup connection and exit sandbox */
generic_thread_dump_lock_overhead();
current_sandbox_exit();
assert(0);
err:

@ -1,39 +0,0 @@
#include <stdint.h>
#include <threads.h>
#include "arch/getcycles.h"
#include "debuglog.h"
extern uint32_t runtime_processor_speed_MHz;
extern uint32_t runtime_quantum_us;
/* Implemented by listener and workers */
thread_local uint64_t generic_thread_lock_duration = 0;
thread_local uint64_t generic_thread_lock_longest = 0;
thread_local uint64_t generic_thread_start_timestamp = 0;
void
generic_thread_initialize()
{
generic_thread_start_timestamp = __getcycles();
generic_thread_lock_longest = 0;
generic_thread_lock_duration = 0;
}
/**
* Reports lock contention
*/
void
generic_thread_dump_lock_overhead()
{
#ifndef NDEBUG
#ifdef LOG_LOCK_OVERHEAD
uint64_t duration = __getcycles() - generic_thread_start_timestamp;
debuglog("Locks consumed %lu / %lu cycles, or %f%%\n", generic_thread_lock_duration, duration,
(double)generic_thread_lock_duration / duration * 100);
debuglog("Longest Held Lock was %lu cycles, or %f quantums\n", generic_thread_lock_longest,
(double)generic_thread_lock_longest / ((uint64_t)runtime_processor_speed_MHz * runtime_quantum_us));
#endif
#endif
}

@ -60,7 +60,8 @@ global_request_scheduler_mtds_add(struct sandbox *sandbox)
struct tenant_global_request_queue *tgrq = sandbox->tenant->tgrq_requests;
LOCK_LOCK(&global_lock);
lock_node_t node = {};
lock_lock(&global_lock, &node);
struct priority_queue *destination_queue = global_request_scheduler_mtds_default;
if (sandbox->tenant->tgrq_requests->mt_class == MT_GUARANTEED) {
@ -84,7 +85,7 @@ global_request_scheduler_mtds_add(struct sandbox *sandbox)
// debuglog("Added the TGRQ back to the Global runqueue - %s to Heapify", QUEUE_NAME);
}
LOCK_UNLOCK(&global_lock);
lock_unlock(&global_lock, &node);
return sandbox;
}
@ -124,9 +125,9 @@ global_request_scheduler_mtds_remove_with_mt_class(struct sandbox **removed_sand
enum MULTI_TENANCY_CLASS target_mt_class)
{
int rc = -ENOENT;
;
LOCK_LOCK(&global_lock);
lock_node_t node = {};
lock_lock(&global_lock, &node);
/* Avoid unnessary locks when the target_deadline is tighter than the head of the Global runqueue */
uint64_t global_guaranteed_deadline = priority_queue_peek(global_request_scheduler_mtds_guaranteed);
@ -180,7 +181,7 @@ global_request_scheduler_mtds_remove_with_mt_class(struct sandbox **removed_sand
}
done:
LOCK_UNLOCK(&global_lock);
lock_unlock(&global_lock, &node);
return rc;
}
@ -227,7 +228,7 @@ global_request_scheduler_mtds_initialize()
global_tenant_timeout_queue = priority_queue_initialize(RUNTIME_MAX_TENANT_COUNT, false,
tenant_timeout_get_priority);
LOCK_INIT(&global_lock);
lock_init(&global_lock);
struct global_request_scheduler_config config = {
.add_fn = global_request_scheduler_mtds_add,
@ -265,7 +266,8 @@ global_request_scheduler_mtds_promote_lock(struct tenant_global_request_queue *t
assert(tgrq != NULL);
// assert(priority_queue_length_nolock(tgrq->sandbox_requests) == 0);
LOCK_LOCK(&global_lock);
lock_node_t node = {};
lock_lock(&global_lock, &node);
if (tgrq->mt_class == MT_GUARANTEED) goto done;
if (priority_queue_length_nolock(tgrq->sandbox_requests) == 0) goto done;
@ -283,7 +285,7 @@ global_request_scheduler_mtds_promote_lock(struct tenant_global_request_queue *t
if (rc == -ENOSPC) panic("Global Guaranteed queue is full!\n");
done:
LOCK_UNLOCK(&global_lock);
lock_unlock(&global_lock, &node);
}
/*

@ -3,7 +3,6 @@
#include "arch/getcycles.h"
#include "global_request_scheduler.h"
#include "generic_thread.h"
#include "listener_thread.h"
#include "module.h"
#include "runtime.h"
@ -252,7 +251,8 @@ on_client_request_received(struct http_session *session)
static void
on_client_response_header_sending(struct http_session *session)
{
assert(session->state = HTTP_SESSION_SEND_RESPONSE_HEADER_BLOCKED);
assert(session->state = HTTP_SESSION_EXECUTION_COMPLETE);
session->state = HTTP_SESSION_SENDING_RESPONSE_HEADER;
int rc = http_session_send_response_header(session, (void_star_cb)listener_thread_register_http_session);
if (likely(rc == 0)) {
@ -371,8 +371,6 @@ listener_thread_main(void *dummy)
{
struct epoll_event epoll_events[RUNTIME_MAX_EPOLL_EVENTS];
generic_thread_initialize();
/* Set my priority */
// runtime_set_pthread_prio(pthread_self(), 2);
pthread_setschedprio(pthread_self(), -20);
@ -399,7 +397,6 @@ listener_thread_main(void *dummy)
on_client_socket_epoll_event(&epoll_events[i]);
}
}
generic_thread_dump_lock_overhead();
}
panic("Listener thread unexpectedly broke loop\n");

@ -287,12 +287,6 @@ log_compiletime_config()
pretty_print_key_disabled("Log HTTP Parser");
#endif
#ifdef LOG_LOCK_OVERHEAD
pretty_print_key_enabled("Log Lock Overhead");
#else
pretty_print_key_disabled("Log Lock Overhead");
#endif
#ifdef LOG_TENANT_LOADING
pretty_print_key_enabled("Log Tenant Loading");
#else

Loading…
Cancel
Save